@article {pmid40383775, year = {2025}, author = {Sen, S and Vairagare, I and Gosai, J and Shrivastava, A}, title = {RABiTPy: an open-source Python software for rapid, AI-powered bacterial tracking and analysis.}, journal = {BMC bioinformatics}, volume = {26}, number = {1}, pages = {127}, pmid = {40383775}, issn = {1471-2105}, support = {R35GM147131/GM/NIGMS NIH HHS/United States ; }, mesh = {*Software ; *Artificial Intelligence ; *Image Processing, Computer-Assisted/methods ; *Bacteria ; }, abstract = {Bacterial tracking is crucial for understanding the mechanisms governing motility, chemotaxis, cell division, biofilm formation, and pathogenesis. Although modern microscopy and computing have enabled the collection of large datasets, many existing tools struggle with big data processing or with accurately detecting, segmenting, and tracking bacteria of various shapes. To address these issues, we developed RABiTPy, an open-source Python software pipeline that integrates traditional and artificial intelligence-based segmentation with tracking tools within a user-friendly framework. RABiTPy runs interactively in Jupyter notebooks and supports numerous image and video formats. Users can select from adaptive, automated thresholding, or AI-based segmentation methods, fine-tuning parameters to fit their needs. The software offers customizable parameters to enhance tracking efficiency, and its streamlined handling of large datasets offers an alternative to existing tracking software by emphasizing usability and modular integration. RABiTPy supports GPU and CPU processing as well as cloud computing. It offers comprehensive spatiotemporal analyses that includes trajectories, motile speeds, mean squared displacement, and turning angles-while providing a variety of visualization options. With its scalable and accessible platform, RABiTPy empowers researchers, even those with limited coding experience, to analyze bacterial physiology and behavior more effectively. By reducing technical barriers, this tool has the potential to accelerate discoveries in microbiology.}, } @article {pmid40380550, year = {2025}, author = {Haddad, T and Kumarapeli, P and de Lusignan, S and Barman, S and Khaddaj, S}, title = {A Sustainable Future in Digital Health: Leveraging Environmentally Friendly Architectural Tactics for Sustainable Data Processing.}, journal = {Studies in health technology and informatics}, volume = {327}, number = {}, pages = {713-717}, doi = {10.3233/SHTI250441}, pmid = {40380550}, issn = {1879-8365}, mesh = {*Big Data ; Humans ; Pilot Projects ; Digital Health ; }, abstract = {The rapid growth of big data in healthcare necessitates optimising data processing to reduce its environmental impact. This paper proposes a pilot architectural framework to evaluate the sustainability of a Big Healthcare Data (BHD) system using Microservices Architecture (MSA). The goal is to enhance MSA's architectural tactics by incorporating environmentally friendly metrics into healthcare systems. This is achieved by adopting energy and carbon efficiency models, alongside exploring innovative architectural strategies. The framework, based on recent research, manipulates cloud-native system architecture by using a controller to adjust microservice deployment through real-time monitoring and modelling. This approach demonstrates how sustainability-driven metrics can be applied at different abstraction levels to estimate environmental impact from multiple perspectives.}, } @article {pmid40372277, year = {2025}, author = {Radovanovic, D and Zanforlin, A and Smargiassi, A and Cinquini, S and Inchingolo, R and Tursi, F and Soldati, G and Carlucci, P}, title = {CHEst PHysical Examination integrated with UltraSound - Phase (CHEPHEUS1). A survey of Accademia di Ecografia Toracica (AdET).}, journal = {Multidisciplinary respiratory medicine}, volume = {20}, number = {}, pages = {}, doi = {10.5826/mrm.2025.1020}, pmid = {40372277}, issn = {1828-695X}, abstract = {BACKGROUND: Chest physical exam (CPE) is based on the four pillars of classical semiotics. However, CPE's sensitivity and specificity are low, and is affected by operators' skills. The aim of this work was to explore the contribution of chest ultrasound (US) to the traditional CPE.

METHODS: For this purpose, a survey was submitted to US users. They were asked to rate the usefulness of classical semiotics and chest US in evaluating each item of CPE pillars. The study was conducted and described according to the STROBE checklist. The study used the freely available online survey cloud-web application (Google Forms, Google Ireland Ltd, Mountain View, CA, USA).

RESULTS: The results showed a tendency to prefer chest US to palpation and percussion, suggesting a possible -future approach based on inspection, auscultation and palpatory ultrasound evaluation.

CONCLUSION: The results of our survey introduce, for the first time, the role of ultrasound as a pillar of physical examination. Our project CHEPHEUS has the aim to study and propose a new way of performing the physical exam in the future.}, } @article {pmid40366511, year = {2025}, author = {Gulkesen, KH and Sonuvar, ET}, title = {Data Privacy in Medical Informatics and Electronic Health Records: A Bibliometric Analysis.}, journal = {Health care analysis : HCA : journal of health philosophy and policy}, volume = {}, number = {}, pages = {}, pmid = {40366511}, issn = {1573-3394}, abstract = {This study aims to evaluate scientific publications on "Medical Informatics" and "Data Privacy" using a bibliometric approach to identify research trends, the most studied topics, and the countries and institutions with the highest publication output. The search was carried out utilizing the WoS Clarivate Analytics tool across SCIE journals. Subsequently, text mining, keyword clustering, and data visualization were applied through the use of VOSviewer and Tableau Desktop software. Between 1975 and 2023, a total of 7,165 articles were published on the topic of data privacy. The number of articles has been increasing each year. The text mining and clustering analysis identified eight main clusters in the literature: (1) Mobile Health/Telemedicine/IOT, (2) Security/Encryption/Authentication, (3) Big Data/AI/Data Science, (4) Anonymization/Digital Phenotyping, (5) Genomics/Biobank, (6) Ethics, (7) Legal Issues, (8) Cloud Computing. On a country basis, the United States was identified as the most active country in this field, producing the most publications and receiving the highest number of citations. China, the United Kingdom, Canada, and Australia also emerged as significant countries. Among these clusters, "Mobile Health/Telemedicine/IOT," "Security/Encryption/Authentication," and "Cloud Computing" technologies stood out as the most prominent and extensively studied topics in the intersection of medical informatics and data privacy.}, } @article {pmid40363312, year = {2025}, author = {Hirsch, M and Mateos, C and Majchrzak, TA}, title = {Exploring Smartphone-Based Edge AI Inferences Using Real Testbeds.}, journal = {Sensors (Basel, Switzerland)}, volume = {25}, number = {9}, pages = {}, doi = {10.3390/s25092875}, pmid = {40363312}, issn = {1424-8220}, support = {PIBAA-28720210101298CO//Centro Científico Tecnológico - Tandil/ ; PIP11220210100138CO//Centro Científico Tecnológico - Tandil/ ; }, abstract = {The increasing availability of lightweight pre-trained models and AI execution frameworks is causing edge AI to become ubiquitous. Particularly, deep learning (DL) models are being used in computer vision (CV) for performing object recognition and image classification tasks in various application domains requiring prompt inferences. Regarding edge AI task execution platforms, some approaches show a strong dependency on cloud resources to complement the computing power offered by local nodes. Other approaches distribute workload horizontally, i.e., by harnessing the power of nearby edge nodes. Many of these efforts experiment with real settings comprising SBC (Single-Board Computer)-like edge nodes only, but few of these consider nomadic hardware such as smartphones. Given the huge popularity of smartphones worldwide and the unlimited scenarios where smartphone clusters could be exploited for providing computing power, this paper sheds some light in answering the following question: Is smartphone-based edge AI a competitive approach for real-time CV inferences? To empirically answer this, we use three pre-trained DL models and eight heterogeneous edge nodes including five low/mid-end smartphones and three SBCs, and compare the performance achieved using workloads from three image stream processing scenarios. Experiments were run with the help of a toolset designed for reproducing battery-driven edge computing tests. We compared latency and energy efficiency achieved by using either several smartphone clusters testbeds or SBCs only. Additionally, for battery-driven settings, we include metrics to measure how workload execution impacts smartphone battery levels. As per the computing capability shown in our experiments, we conclude that edge AI based on smartphone clusters can help in providing valuable resources to contribute to the expansion of edge AI in application scenarios requiring real-time performance.}, } @article {pmid40363125, year = {2025}, author = {Mo, Y and Chen, P and Zhou, W and Chen, W}, title = {Enhanced Cloud Detection Using a Unified Multimodal Data Fusion Approach in Remote Images.}, journal = {Sensors (Basel, Switzerland)}, volume = {25}, number = {9}, pages = {}, doi = {10.3390/s25092684}, pmid = {40363125}, issn = {1424-8220}, support = {62261038//National Natural Science Foundation of China/ ; }, abstract = {Aiming at the complexity of network architecture design and the low computational efficiency caused by variations in the number of modalities in multimodal cloud detection tasks, this paper proposes an efficient and unified multimodal cloud detection model, M2Cloud, which can process any number of modal data. The core innovation of M2Cloud lies in its novel multimodal data fusion method. This method avoids architectural changes for new modalities, thereby significantly reducing incremental computing costs and enhancing overall efficiency. Furthermore, the designed multimodal data fusion module possesses strong generalization capabilities and can be seamlessly integrated into other network architectures in a plug-and-play manner, greatly enhancing the module's practicality and flexibility. To address the challenge of unified multimodal feature extraction, we adopt two key strategies: (1) constructing feature extraction modules with shared but independent weights for each modality to preserve the inherent features of each modality; (2) utilizing cosine similarity to adaptively learn complementary features between different modalities, thereby reducing redundant information. Experimental results demonstrate that M2Cloud achieves or even surpasses the state-of-the-art (SOTA) performance on the public multimodal datasets WHUS2-CD and WHUS2-CD+, verifying its effectiveness in the unified multimodal cloud detection task. The research presented in this paper offers new insights and technical support for the field of multimodal data fusion and cloud detection, and holds significant theoretical and practical value.}, } @article {pmid40363089, year = {2025}, author = {Jilcha, LA and Kim, DH and Kwak, J}, title = {Temporal Decay Loss for Adaptive Log Anomaly Detection in Cloud Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {25}, number = {9}, pages = {}, doi = {10.3390/s25092649}, pmid = {40363089}, issn = {1424-8220}, support = {NRF: No. 2021R1A2C2011391 and IITP: No.2024-00400302//National Research Foundation of Korea (NRF) grant funded by the Korea government (MSIT), and Institute of Information & Communications Technology Planning & Evaluation (IITP) grant funded by the Korea government (MSIT)/ ; }, abstract = {Log anomaly detection in cloud computing environments is essential for maintaining system reliability and security. While sequence modeling architectures such as LSTMs and Transformers have been widely employed to capture temporal dependencies in log messages, their effectiveness deteriorates in zero-shot transfer scenarios due to distributional shifts in log structures, terminology, and event frequencies, as well as minimal token overlap across datasets. To address these challenges, we propose an effective detection approach integrating a domain-specific pre-trained language model (PLM) fine-tuned on cybersecurity-adjacent data with a novel loss function, Loss with Decaying Factor (LDF). LDF introduces an exponential time decay mechanism into the training objective, ensuring a dynamic balance between historical context and real-time relevance. Unlike traditional sequence models that often overemphasize outdated information and impose high computational overhead, LDF constrains the training process by dynamically weighing log messages based on their temporal proximity, thereby aligning with the rapidly evolving nature of cloud computing environments. Additionally, the domain-specific PLM mitigates semantic discrepancies by improving the representation of log data across heterogeneous datasets. Extensive empirical evaluations on two supercomputing log datasets demonstrate that this approach substantially enhances cross-dataset anomaly detection performance. The main contributions of this study include: (1) the introduction of a Loss with Decaying Factor (LDF) to dynamically balance historical context with real-time relevance; and (2) the integration of a domain-specific PLM for enhancing generalization in zero-shot log anomaly detection across heterogeneous cloud environments.}, } @article {pmid40360639, year = {2025}, author = {R, D and T S, PK}, title = {Dual level dengue diagnosis using lightweight multilayer perceptron with XAI in fog computing environment and rule based inference.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {16548}, pmid = {40360639}, issn = {2045-2322}, mesh = {*Dengue/diagnosis ; Humans ; *Cloud Computing ; Dengue Virus ; *Neural Networks, Computer ; Machine Learning ; Deep Learning ; Algorithms ; Multilayer Perceptrons ; }, abstract = {Over the last fifty years, arboviral infections have made an unparalleled contribution to worldwide disability and morbidity. Globalization, population growth, and unplanned urbanization are the main causes. Dengue is regarded as the most significant arboviral illness among them due to its prior dominance in growth. The dengue virus is mostly transmitted to humans by Aedes mosquitoes. The human body infected with dengue virus (DenV) will experience certain adverse impacts. To keep the disease under control, some of the preventative measures implemented by different countries need to be updated. Manual diagnosis is typically employed, and the accuracy of the diagnosis is assessed based on the experience of the healthcare professionals. Because there are so many patients during an outbreak, incompetence also happens. Remote monitoring and massive data storage are required. Though cloud computing is one of the solutions, it has a significant latency, despite its potential for remote monitoring and storage. Also, the diagnosis should be made as quickly as possible. The aforementioned issue has been resolved with fog computing, which significantly lowers latency and facilitates remote diagnosis. This study especially focuses on incorporating machine learning and deep learning techniques in the fog computing environment to leverage the overall diagnostic efficiency of dengue by promoting remote diagnosis and speedy treatment. A dual-level dengue diagnosis framework has been proposed in this study. Level-1 diagnosis is based on the symptoms of the patients, which are sent from the edge layer to the fog. Level-1 diagnosis is done in the fog to manage the storage and computation issues. An optimized and normalized lightweight MLP has been proposed along with preprocessing and feature reduction techniques in this study for the Level-1 Diagnosis in the fog computing environment. Pearson Correlation coefficient has been calculated between independent and target features to aid in feature reduction. Techniques like K-fold cross-validation, batch normalization, and grid search optimization have been used for increasing the efficiency. A variety of metrics have been computed to assess the effectiveness of the model. Since the suggested model is a "black box," explainable artificial intelligence (XAI) tools such as SHAP and LIME have been used to help explain its predictions. An exceptional accuracy of 92% is attained with the small dataset using the proposed model. The fog layer sends the list of probable cases to the edge layer. Also, a precision of 100% and an F1 score of 90% have been attained using the proposed model. The list of probable cases is sent from the fog layer to the edge layer, where Level-2 Diagnosis is carried out. Level-2 diagnosis is based on the serological test report of the suspected patients of the Level-1 diagnosis. Level-2 diagnosis is done at the edge using the rule-based inference method. This study incorporates dual-level diagnosis, which is not seen in recent studies. The majority of investigations end at Level 1. However, this study minimizes incorrect treatment and fatality rates by using dual-level diagnosis and assisting in confirmation of the disease.}, } @article {pmid40359310, year = {2025}, author = {Kotan, M and Faruk Seymen, Ö and Çallı, L and Kasım, S and Çarklı Yavuz, B and Över Özçelik, T}, title = {A novel methodological approach to SaaS churn prediction using whale optimization algorithm.}, journal = {PloS one}, volume = {20}, number = {5}, pages = {e0319998}, doi = {10.1371/journal.pone.0319998}, pmid = {40359310}, issn = {1932-6203}, mesh = {*Algorithms ; *Software ; *Cloud Computing ; Bayes Theorem ; Neural Networks, Computer ; Decision Trees ; Whales ; }, abstract = {Customer churn is a critical concern in the Software as a Service (SaaS) sector, potentially impacting long-term growth within the cloud computing industry. The scarcity of research on customer churn models in SaaS, particularly regarding diverse feature selection methods and predictive algorithms, highlights a significant gap. Addressing this would enhance academic discourse and provide essential insights for managerial decision-making. This study introduces a novel approach to SaaS churn prediction using the Whale Optimization Algorithm (WOA) for feature selection. Results show that WOA-reduced datasets improve processing efficiency and outperform full-variable datasets in predictive performance. The study encompasses a range of prediction techniques with three distinct datasets evaluated derived from over 1,000 users of a multinational SaaS company: the WOA-reduced dataset, the full-variable dataset, and the chi-squared-derived dataset. These three datasets were examined with the most used in literature, k-nearest neighbor, Decision Trees, Naïve Bayes, Random Forests, and Neural Network techniques, and the performance metrics such as Area Under Curve, Accuracy, Precision, Recall, and F1 Score were used as classification success. The results demonstrate that the WOA-reduced dataset outperformed the full-variable and chi-squared-derived datasets regarding performance metrics.}, } @article {pmid40353020, year = {2025}, author = {Al-Rubaie, A}, title = {From Cadavers to Codes: The Evolution of Anatomy Education Through Digital Technologies.}, journal = {Medical science educator}, volume = {35}, number = {2}, pages = {1101-1109}, doi = {10.1007/s40670-024-02268-6}, pmid = {40353020}, issn = {2156-8650}, abstract = {This review examines the shift from traditional anatomy education to the integration of advanced digital technologies. With rapid advancements in digital tools, such as 3D models, virtual dissections, augmented reality (AR) and virtual reality (VR), anatomy education is increasingly adopting digital environments to enhance learning. These tools offer immersive, interactive experiences, supporting active learning and knowledge retention. Mobile technology and cloud computing have further increased accessibility, allowing flexible, self-paced learning. Despite challenges like educator resistance and institutional barriers, the continued innovation and integration of digital tools have the potential to transform anatomy education and improve medical outcomes.}, } @article {pmid40352432, year = {2025}, author = {Lounissi, E and Das, SK and Peter, R and Zhang, X and Ray, S and Jia, L}, title = {FunDa: scalable serverless data analytics and in situ query processing.}, journal = {Journal of big data}, volume = {12}, number = {1}, pages = {116}, doi = {10.1186/s40537-025-01141-6}, pmid = {40352432}, issn = {2196-1115}, abstract = {The pay-what-you-use model of serverless Cloud computing (or serverless, for short) offers significant benefits to the users. This computing paradigm is ideal for short running ephemeral tasks, however, it is not suitable for stateful long running tasks, such as complex data analytics and query processing. We propose FunDa, an on-premises serverless data analytics framework, which extends our previously proposed system for unified data analytics and in situ SQL query processing called DaskDB. Unlike existing serverless solutions, which struggle with stateful and long running data analytics tasks, FunDa overcomes their limitations. Our ongoing research focuses on developing a robust architecture for FunDa, enabling true serverless in on-premises environments, while being able to operate on a public Cloud, such as AWS Cloud. We have evaluated our system on several benchmarks with different scale factors. Our experimental results in both on-premises and AWS Cloud settings demonstrate FunDa's ability to support automatic scaling, low-latency execution of data analytics workloads, and more flexibility to serverless users.}, } @article {pmid40347833, year = {2025}, author = {Wertheim, JO and Vasylyeva, TI and Wood, RJ and Cantrell, K and Contreras, SP and Feldheim, A and Goyal, R and Havens, JL and Knight, R and Laurent, LC and Moshiri, N and Neuhard, R and Sathe, S and Satterlund, A and Scioscia, A and Song, AY and , and Schooley, RT and Anderson, CM and Martin, NK}, title = {Phylogeographic and genetic network assessment of COVID-19 mitigation protocols on SARS-CoV-2 transmission in university campus residences.}, journal = {EBioMedicine}, volume = {116}, number = {}, pages = {105729}, doi = {10.1016/j.ebiom.2025.105729}, pmid = {40347833}, issn = {2352-3964}, abstract = {BACKGROUND: Congregate living provides an ideal setting for SARS-CoV-2 transmission in which many outbreaks and superspreading events occurred. To avoid large outbreaks, universities turned to remote operations during the initial COVID-19 pandemic waves in 2020 and 2021. In late-2021, the University of California San Diego (UC San Diego) facilitated the return of students to campus with comprehensive testing, vaccination, masking, wastewater surveillance, and isolation policies.

METHODS: We performed molecular epidemiological and phylogeographic analysis of 4418 SARS-CoV-2 genomes sampled from UC San Diego students during the Omicron waves between December 2021 and September 2022, representing 58% of students with confirmed SARS-CoV-2 infection. We overlaid these analyses across on-campus residential information to assess the spread and persistence of SARS-CoV-2 within university residences.

FINDINGS: Within campus residences, SARS-CoV-2 transmission was frequent among students residing in the same room or suite. However, a quarter of pairs of suitemates with concurrent infections had distantly related viruses, suggesting separate sources of infection during periods of high incidence in the surrounding community. Students with concurrent infections residing in the same building were not at substantial increased probability of being members of the same transmission cluster. Genetic network and phylogeographic inference indicated that only between 3.1 and 12.4% of infections among students could be associated with transmission within buildings outside of individual suites. The only super-spreading event we detected was related to a large event outside campus residences.

INTERPRETATION: We found little evidence for sustained SARS-CoV-2 transmission within individual buildings, aside from students who resided in the same suite. Even in the face of heightened community transmission during the 2021-2022 Omicron waves, congregate living did not result in a heightened risk for SARS-CoV-2 transmission in the context of the multi-pronged mitigation strategy.

FUNDING: SEARCH Alliance: Centers for Disease Control and Prevention (CDC) BAA (75D301-22-R-72097) and the Google Cloud Platform Research Credits Program. J.O.W.: NIH-NIAID (R01 AI135992). T.I.V.: Branco Weiss Fellowship and Newkirk Fellowship. L.L.: University of California San Diego.}, } @article {pmid40346106, year = {2025}, author = {Song, Y}, title = {Privacy-preserving and verifiable spectral graph analysis in the cloud.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {16237}, pmid = {40346106}, issn = {2045-2322}, abstract = {Resorting to cloud computing for spectral graph analysis on large-scale graph data is becoming increasingly popular. However, given the intrusive and opaque natures of cloud services, privacy, and misbehaving cloud that returns incorrect results have raised serious concerns. Current schemes are proposed for privacy alone under the semi-honest model, while disregarding the realistic threat posed by the misbehaving cloud that might skip computationally intensive operations for economic gain. Additionally, existing verifiable computation techniques prove inadequate for the specialized requirements of spectral graph analysis, either due to compatibility issues with privacy-preserving protocols or the excessive computational burden they impose on resource-constrained users. To tackle the above two issues in a holistic solution, we present, tailor, and evaluate PVG, a privacy-preserving and verifiable framework for spectral graph analytics in the cloud for the first time. PVG concentrates on the eigendecomposition process, and provides strong privacy for graph data while enabling users to validate the accuracy of the outcomes yielded by the cloud. For this, we first design a new additive publicly verifiable computation algorithm, APVC, that can verify the accuracy of the result of the core operation (matrix multiplication) in eigendecomposition returned by cloud servers. We then propose three secure and verifiable functions for eigendecomposition based on APVC and lightweight cryptography. Extensive experiments on three manually generated and two real-world social graph datasets indicate that PVG's accuracy is consistent with plaintext, with practically affordable performance superior to prior art.}, } @article {pmid40343041, year = {2025}, author = {Siddiqui, N and Lee, B and Yi, V and Farek, J and Khan, Z and Kalla, SE and Wang, Q and Walker, K and Meldrim, J and Kachulis, C and Gatzen, M and Lennon, NJ and Mehtalia, S and Catreux, S and Mehio, R and Gibbs, RA and Venner, E}, title = {Celeste : A cloud-based genomics infrastructure with variant-calling pipeline suited for population-scale sequencing projects.}, journal = {medRxiv : the preprint server for health sciences}, volume = {}, number = {}, pages = {}, doi = {10.1101/2025.04.29.25326690}, pmid = {40343041}, abstract = {BACKGROUND: The All of Us Research Program (All of Us) is one of the world's largest sequencing efforts that will generate genetic data for over one million individuals from diverse backgrounds. This historic megaproject will create novel research platforms that integrate an unprecedented amount of genetic data with longitudinal health information. Here, we describe the design of Celeste , a resilient, open-source cloud architecture for implementing genomics workflows that has successfully analyzed petabytes of participant genomic information for All of Us - thereby enabling other large-scale sequencing efforts with a comprehensive set of tools to power analysis. The Celeste infrastructure is tremendously scalable and has routinely processed fluctuating workloads of up to 9,000 whole-genome sequencing (WGS) samples for All of Us , monthly. It also lends itself to multiple projects. Serverless technology and container orchestration form the basis of Celeste 's system for managing this volume of data.

RESULTS: In 12 months of production (within a single Amazon Web Services (AWS) Region), around 200 million serverless functions and over 20 million messages coordinated the analysis of 1.8 million bioinformatics, quality control, and clinical reporting jobs. Adapting WGS analysis to clinical projects requires adaptation of variant-calling methods to enrich the reliable detection of variants with known clinical importance. Thus, we also share the process by which we tuned the variant-calling pipeline in use by the multiple genome centers supporting All of Us to maximize precision and accuracy for low fraction variant calls with clinical significance.

CONCLUSIONS: When combined with hardware-accelerated implementations for genomic analysis, Celeste had far-reaching, positive implications for turn-around time, dynamic scalability, security, and storage of analysis for one hundred-thousand whole-genome samples and counting. Other groups may align their sequencing workflows to this harmonized pipeline standard, included within the Celeste framework, to meet clinical requisites for population-scale sequencing efforts. Celeste is available as an Amazon Web Services (AWS) deployment in GitHub, and includes command-line parameters and software containers.}, } @article {pmid40341961, year = {2025}, author = {Adams, JI and Kutschera, E and Hu, Q and Liu, CJ and Liu, Q and Kadash-Edmondson, K and Liu, S and Xing, Y}, title = {rMATS-cloud: Large-scale Alternative Splicing Analysis in the Cloud.}, journal = {Genomics, proteomics & bioinformatics}, volume = {}, number = {}, pages = {}, doi = {10.1093/gpbjnl/qzaf036}, pmid = {40341961}, issn = {2210-3244}, abstract = {Although gene expression analysis pipelines are often a standard part of bioinformatics analysis, with many publicly available cloud workflows, cloud-based alternative splicing analysis tools remain limited. Our lab released rMATS in 2014 and has continuously maintained it, providing a fast and versatile solution for quantifying alternative splicing from RNA sequencing (RNA-seq) data. Here, we present rMATS-cloud, a portable version of the rMATS workflow that can be run in virtually any cloud environment suited for biomedical research. We compared the time and cost of running rMATS-cloud with two RNA-seq datasets on three different platforms (Cavatica, Terra, and Seqera). Our findings demonstrate that rMATS-cloud handles RNA-seq datasets with thousands of samples, and therefore is ideally suited for the storage capacities of many cloud data repositories. rMATS-cloud is available at https://dockstore.org/workflows/github.com/Xinglab/rmats-turbo/rmats-turbo-cwl, https://dockstore.org/workflows/github.com/Xinglab/rmats-turbo/rmats-turbo-wdl, and https://dockstore.org/workflows/github.com/Xinglab/rmats-turbo/rmats-turbo-nextflow.}, } @article {pmid40338714, year = {2025}, author = {Lee, H and Lee, S and Lee, S}, title = {Visibility-Aware Multi-View Stereo by Surface Normal Weighting for Occlusion Robustness.}, journal = {IEEE transactions on pattern analysis and machine intelligence}, volume = {PP}, number = {}, pages = {}, doi = {10.1109/TPAMI.2025.3568447}, pmid = {40338714}, issn = {1939-3539}, abstract = {Recent learning-based multi-view stereo (MVS) still exhibits insufficient accuracy in large occlusion cases, such as environments with significant inter-camera distance or when capturing objects with complex shapes. This is because incorrect image features extracted from occluded areas serve as significant noise in the cost volume construction. To address this, we propose a visibility-aware MVS using surface normal weighting (SnowMVSNet) based on explicit 3D geometry. It selectively suppresses mismatched features in the cost volume construction by computing inter-view visibility. Additionally, we present a geometry-guided cost volume regularization that enhances true depth among depth hypotheses using a surface normal prior. We also propose intra-view visibility that distinguishes geometrically more visible pixels within a reference view. Using intra-view visibility, we introduce the visibility-weighted training and depth estimation methods. These methods enable the network to achieve accurate 3D point cloud reconstruction by focusing on visible regions. Based on simple inter-view and intra-view visibility computations, SnowMVSNet accomplishes substantial performance improvements relative to computational complexity, particularly in terms of occlusion robustness. To evaluate occlusion robustness, we constructed a multi-view human (MVHuman) dataset containing general human body shapes prone to self-occlusion. Extensive experiments demonstrated that SnowMVSNet significantly outperformed state-of-the-art methods in both low- and high-occlusion scenarios.}, } @article {pmid40337922, year = {2025}, author = {Mareuil, F and Torchet, R and Ruano, LC and Mallet, V and Nilges, M and Bouvier, G and Sperandio, O}, title = {InDeepNet: a web platform for predicting functional binding sites in proteins using InDeep.}, journal = {Nucleic acids research}, volume = {}, number = {}, pages = {}, doi = {10.1093/nar/gkaf403}, pmid = {40337922}, issn = {1362-4962}, support = {//Dassault Systèmes La Fondation/ ; PFR7//Fondation de France/ ; }, abstract = {Predicting functional binding sites in proteins is crucial for understanding protein-protein interactions (PPIs) and identifying drug targets. While various computational approaches exist, many fail to assess PPI ligandability, which often involves conformational changes. We introduce InDeepNet, a web-based platform integrating InDeep, a deep-learning model for binding site prediction, with InDeepHolo, which evaluates a site's propensity to adopt a ligand-bound (holo) conformation. InDeepNet provides an intuitive interface for researchers to upload protein structures from in-house data, the Protein Data Bank (PDB), or AlphaFold, predicting potential binding sites for proteins or small molecules. Results are presented as interactive 3D visualizations via Mol*, facilitating structural analysis. With InDeepHolo, the platform helps select conformations optimal for small-molecule binding, improving structure-based drug design. Accessible at https://indeep-net.gpu.pasteur.cloud/, InDeepNet removes the need for specialized coding skills or high-performance computing, making advanced predictive models widely available. By streamlining PPI target assessment and ligandability prediction, it assists research and supports therapeutic development targeting PPIs.}, } @article {pmid40336969, year = {2025}, author = {Tamantini, C and Marra, F and Di Tocco, J and Di Modica, S and Lanata, A and Cordella, F and Ferrarin, M and Rizzo, F and Stefanelli, M and Papacchini, M and Delle Site, C and Tamburrano, A and Massaroni, C and Schena, E and Zollo, L and Sarto, MS}, title = {SenseRisc: An instrumented smart shirt for risk prevention in the workplace.}, journal = {Wearable technologies}, volume = {6}, number = {}, pages = {e20}, doi = {10.1017/wtc.2025.10}, pmid = {40336969}, issn = {2631-7176}, abstract = {The integration of wearable smart garments with multiple sensors has gained momentum, enabling real-time monitoring of users' vital parameters across various domains. This study presents the development and validation of an instrumented smart shirt for risk prevention in workplaces designed to enhance worker safety and well-being in occupational settings. The proposed smart shirt is equipped with sensors for collecting electrocardiogram, respiratory waveform, and acceleration data, with signal conditioning electronics and Bluetooth transmission to the mobile application. The mobile application sends the data to the cloud platform for subsequent Preventive Risk Index (PRI) extraction. The proposed SenseRisc system was validated with eight healthy participants during the execution of different physically exerting activities to assess the capability of the system to capture physiological parameters and estimate the PRI of the worker, and user subjective perception of the instrumented intelligent shirt.}, } @article {pmid40335531, year = {2025}, author = {Baskar, R and Mohanraj, E}, title = {Hybrid multi objective marine predators algorithm based clustering for lightweight resource scheduling and application placement in fog.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {15953}, pmid = {40335531}, issn = {2045-2322}, abstract = {The Internet of Things (IoT) has boosted fog computing, which complements the cloud. This is critical for applications that need close user proximity. Efficient allocation of IoT applications to the fog, as well as fog device scheduling, enabling the realistic execution of IoT application deployment in the fog environment. The scheduling difficulties are multi-objective in nature, since they must handle the issues of avoiding resource waste, network latency, and maximising Quality of Service (QoS) on fog nodes. In this research, the Hybrid Multi-Objective Marine Predators Algorithm-based Clustering and Fog Picker (HMMPACFP) Technique is developed as a combinatorial model for tackling the problem of fog node allocation, with the goal of achieving dynamic scheduling using lightweight characteristics. Utilised Fog Picker to allocate IoT components to fog nodes based on QoS parameters. Simulation trials of the proposed HMMPACFP scheme utilising iMetal and iFogSim with Hypervolume (HV) and Generational Distance (IGD) demonstrated its superiority over the benchmarked methodologies utilised for evaluation. The combination of Fog Picker with the suggested HMMPACFP scheme resulted in 32.18% faster convergence, 26.92% more solution variety, and a better balance between exploration and exploitation rates.}, } @article {pmid40325903, year = {2025}, author = {Mohanty, S and Pandey, PC}, title = {Spatiotemporal dynamics of Ramsar wetlands and freshwater resources: Technological innovations for ecosystem conservation.}, journal = {Water environment research : a research publication of the Water Environment Federation}, volume = {97}, number = {5}, pages = {e70072}, doi = {10.1002/wer.70072}, pmid = {40325903}, issn = {1554-7531}, support = {NGP/TPN-30705/2019(G)//National Geospatial Program/ ; }, mesh = {*Wetlands ; *Environmental Monitoring ; *Fresh Water ; Water Quality ; *Conservation of Natural Resources/methods ; }, abstract = {Aquatic ecosystems, particularly wetlands, are vulnerable to natural and anthropogenic influences. This study examines the Saman Bird Sanctuary and Keetham Lake, both Ramsar sites, using advanced remote sensing for water occurrence, land use and land cover (LULC), and water quality assessments. Sentinel data, processed in cloud computing, enabled land-use classification, water boundary delineation, and seasonal water occurrence mapping. A combination of Modified Normalized Difference Water Index (MNDWI), OTSU threshold segmentation, and Canny edge detection provided precise seasonal water boundaries. Study utilized a combination of the MNDWI, OTSU threshold segmentation, and Canny edge detection methods. These approaches allowed for precise delineation of seasonal water boundaries. Sixteen water quality parameters including pH, turbidity, dissolved oxygen (DO), chemical oxygen demand (COD), total hardness (TH), total alkalinity (TA), total dissolved solid (TDS), electrical conductivity (EC), phosphates (PO4), nitrate (NO3), chloride (Cl[-]), fluoride (F[-]), carbon dioxide (CO2), silica (Si), iodine (I[-]), and chromium (Cr[-]) were analyzed and compared for both sites. Results showed significant LULC changes, particularly at Saman, with scrub forest, built-up areas, and agriculture increasing, while flooded vegetation and open water declined. Significant LULC changes were observed near Marsh wetland, where positive changes up to 42.17% were seen for built-up in surrounding regions, with an increase to 5.43 ha in 2021 from 3.14 ha in 2017. Positive change was observed for scrub forests up to 21.02%, with a rise of 2.18 ha. Vegetation in the marsh region, including seasonal grasses and hydrophytes, has shown an increase in extent up to 0.39 ha with a rise of 7.12%. Spatiotemporal water occurrence was analyzed across pre-monsoon, monsoon, and post-monsoon seasons using Sentinel-1 data. The study highlights the role of remote sensing and field-based water quality monitoring in understanding ecological shifts and anthropogenic pressures on wetlands. By integrating land-use changes and water quality analysis, this research provides critical information for planning and conservation efforts. It provides vital insights for conservation planning, advocating for continued monitoring and adaptive management to sustain these critical ecosystems. PRACTITIONER POINTS: Spatiotemporal surface water occurrence at two geographically different wetlands-lake and marsh wetland; LULC and its change analysis to evaluate the impact on wetlands and its surrounding environment-positive and negative changes; Boundary delineation to examine changes and identify low-lying areas during the pre- and post-monsoon; Comparative analysis of the water quality of two different wetlands; Insectivorous plant-Utricularia stellaris, was recorded from Northern India at the Saman Bird Sanctuary for the first time.}, } @article {pmid40319726, year = {2025}, author = {Xu, W and Althumayri, M and Tarman, AY and Ceylan Koydemir, H}, title = {An integrated wearable fluorescence sensor for E. coli detection in catheter bags.}, journal = {Biosensors & bioelectronics}, volume = {283}, number = {}, pages = {117539}, doi = {10.1016/j.bios.2025.117539}, pmid = {40319726}, issn = {1873-4235}, abstract = {Urinary tract infections (UTIs), including catheter-associated UTIs (CAUTIs), affect millions worldwide. Traditional diagnostic methods, like urinalysis and urine culture, have limitations-urinalysis is fast but lacks sensitivity, while urine culture is accurate but takes up to two days. Here, we present an integrated wearable fluorescence sensor to detect UTI-related bacterial infections early at the point of care by on-body monitoring. The sensor features a hardware platform with a flexible PCB that attaches to a urine catheter bag, emitting excitation light and detecting emission light of E. coli-specific enzymatic reaction for continuous monitoring. Our custom-developed smartphone application allows remote control and data transfer via Bluetooth and performs in situ data analysis without cloud computing. The performance of the device was demonstrated by detecting E. coli at concentrations of 10[0]-10[5] CFU/mL within 9 to 3.5 h, respectively, with high sensitivity and by testing the specificity using Gram-positive (i.e., Staphylococcus epidermidis) and Gram-negative (i.e., Pseudomonas aeruginosa and Klebsiella pneumoniae) pathogens. An in vitro bladder model testing was performed using E.coli-spiked human urine samples to further evaluate the device's practicality. This portable, cost-effective device has the potential to transform the clinical practice of UTI diagnosis with automated and rapid bacterial detection at the point of care.}, } @article {pmid40317230, year = {2025}, author = {Peacock, JG and Cole, R and Duncan, J and Jensen, B and Snively, B and Samuel, A}, title = {Transforming Military Healthcare Education and Training: AI Integration for Future Readiness.}, journal = {Military medicine}, volume = {}, number = {}, pages = {}, doi = {10.1093/milmed/usaf169}, pmid = {40317230}, issn = {1930-613X}, abstract = {INTRODUCTION: Artificial intelligence (AI) technologies have spread throughout the world and changed the way that many social functions are conducted, including health care. Future large-scale combat missions will likely require health care professionals to utilize AI tools among other tools in providing care for the Warfighter. Despite the need for an AI-capable health care force, medical education lacks an integration of medical AI knowledge. The purpose of this manuscript was to review ways that military health care education can be improved with an understanding of and using AI technologies.

MATERIALS AND METHODS: This article is a review of the literature regarding the integration of AI technologies in medicine and medical education. We do provide examples of quotes and images from a larger USU study on a Faculty Development program centered on learning about AI technologies in health care education. The study is not complete and is not the focus of this article, but was approved by the USU IRB.

RESULTS: Effective integration of AI technologies in military health care education requires military health care educators that are willing to learn how to safely, effectively, and ethically use AI technologies in their own administrative, educational, research, and clinical roles. Together with health care trainees, these faculties can help to build and co-create AI-integrated curricula that will accelerate and enhance the military health care curriculum of tomorrow. Trainees can begin to use generative AI tools, like large language models, to begin to develop their skills and practice the art of generating high-quality AI tools that will improve their studies and prepare them to improve military health care. Integration of AI technologies in the military health care environment requires close military-industry collaborations with AI and security experts to ensure personal and health care information security. Through secure cloud computing, blockchain technologies, and Application Programming Interfaces, among other technologies, military health care facilities and systems can safely integrate AI technologies to enhance patient care, clinical research, and health care education.

CONCLUSIONS: AI technologies are not a dream of the future, they are here, and they are being integrated and implemented in military health care systems. To best prepare the military health care professionals of the future for the reality of medical AI, we must reform military health care education through a combined effort of faculty, students, and industry partners.}, } @article {pmid40315072, year = {2025}, author = {Liu, K and Liu, YJ and Chen, B}, title = {General 3D Vision-Language Model with Fast Rendering and Pre-training Vision-Language Alignment.}, journal = {IEEE transactions on pattern analysis and machine intelligence}, volume = {PP}, number = {}, pages = {}, doi = {10.1109/TPAMI.2025.3566593}, pmid = {40315072}, issn = {1939-3539}, abstract = {Deep neural network models have achieved remarkable progress in 3D scene understanding while trained in the closed-set setting and with full labels. However, the major bottleneck for the current 3D recognition approach is that these models do not have the capacity to recognize any unseen novel classes beyond the training categories in diverse real-world applications. In the meantime, current state-of-the-art 3D scene understanding approaches primarily require a large number of high-quality labels to train neural networks, which merely perform well in a fully supervised manner. Therefore, we are in urgent need of a framework that can simultaneously be applicable to both 3D point cloud segmentation and detection, particularly in the circumstances where the labels are rather scarce. This work presents a generalized and straightforward framework for dealing with 3D scene understanding when the labeled scenes are quite limited. To extract knowledge for novel categories from the pre-trained vision-language models, we propose a hierarchical feature-aligned pre-training and knowledge distillation strategy to extract and distill meaningful information from large-scale vision-language models, which helps benefit the open-vocabulary scene understanding tasks. To leverage the boundary information, we propose a novel energy-based loss with boundary awareness benefiting from the region-level boundary predictions. To encourage latent instance discrimination and to guarantee efficiency, we propose the unsupervised region-level semantic contrastive learning scheme for point clouds, using confident predictions of the neural network to discriminate the intermediate feature embeddings at multiple stages. In the limited reconstruction case, our proposed approach, termed WS3D++, ranks 1st on the large-scale ScanNet benchmark on both the task of semantic segmentation and instance segmentation. Also, our proposed WS3D++ achieves state-of-the-art data-efficient learning performance on the other large-scale real-scene indoor and outdoor datasets S3DIS and SemanticKITTI. Extensive experiments with both indoor and outdoor scenes demonstrated the effectiveness of our approach in both data-efficient learning and open-world few-shot learning. All codes, models, and data are to made publicly available at: https://github.com/KangchengLiu. The code is at: https://drive.google.com/drive/folders/1M58V-PtR8DBEwD296zJkNg_m2qq-MTAP Code link.}, } @article {pmid40126006, year = {2025}, author = {Chellamuthu, S and Ramanathan, K and Arivanandhan, R}, title = {HUNHODRL: Energy efficient resource distribution in a cloud environment using hybrid optimized deep reinforcement model with HunterPlus scheduler.}, journal = {Network (Bristol, England)}, volume = {}, number = {}, pages = {1-26}, doi = {10.1080/0954898X.2025.2480294}, pmid = {40126006}, issn = {1361-6536}, abstract = {Resource optimization and workload balancing in cloud computing environments necessitate efficient management of resources to minimize energy wastage and SLA (Service Level Agreement) violations. The existing scheduling techniques often face challenges with dynamic resource allocations and lead to inefficient job completion rates and container utilizations. Hence, this framework has been proposed to establish HUNHODRL, a newly-minted DRL-based framework that aims to improve container orchestration and workload allocation. The evaluation of this framework was done against HUNDRL, Bi-GGCN, and CNN methods comparatively under two sets of workloads with datasets on CPU, Memory, and Disk I/O utilization metrics. The model optimizes scheduling choices in HUNHODRL through a combination of destination host capacity vector and active job utilization matrix. The experimental results show that HUNHODRL outperforms existing models in container creation rate, job completion rate, SLA violation reduction, and energy efficiency. It facilitates increased container creation efficiency without increasing the energy costs of VM deployments. This method dynamically adapts itself and modifies the scheduling strategy to optimize performance amid varying workloads, thus establishing its scalability and robustness. A comparative analysis has demonstrated higher job completion rates against CNN, Bi-GGCNN, and HUNDRL, establishing the potential of DRL-based resource allocation. The significant gain in cloud resource utilization and energy-efficient task execution makes HUNHODRL and its suitable solution for next-generation cloud computing infrastructure.}, } @article {pmid40312585, year = {2025}, author = {Kathole, AB and Singh, VK and Goyal, A and Kant, S and Savyanavar, AS and Ubale, SA and Jain, P and Islam, MT}, title = {Novel load balancing mechanism for cloud networks using dilated and attention-based federated learning with Coati Optimization.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {15268}, pmid = {40312585}, issn = {2045-2322}, support = {DPK-2022-006//Dana Padanan Kolaborasi (DPK)/ ; }, abstract = {Load balancing (LB) is a critical aspect of Cloud Computing (CC), enabling efficient access to virtualized resources over the internet. It ensures optimal resource utilization and smooth system operation by distributing workloads across multiple servers, preventing any server from being overburdened or underutilized. This process enhances system reliability, resource efficiency, and overall performance. As cloud computing expands, effective resource management becomes increasingly important, particularly in distributed environments. This study proposes a novel approach to resource prediction for cloud network load balancing, incorporating federated learning within a blockchain framework for secure and distributed management. The model leverages Dilated and Attention-based 1-Dimensional Convolutional Neural Networks with bidirectional long short-term memory (DA-DBL) to predict resource needs based on factors such as processing time, reaction time, and resource availability. The integration of the Random Opposition Coati Optimization Algorithm (RO-COA) enables flexible and efficient load distribution in response to real-time network changes. The proposed method is evaluated on various metrics, including active servers, makespan, Quality of Service (QoS), resource utilization, and power consumption, outperforming existing approaches. The results demonstrate that the combination of federated learning and the RO-COA-based load balancing method offers a robust solution for enhancing cloud resource management.}, } @article {pmid40301430, year = {2025}, author = {Zhang, H and Liu, M and Liu, W and Shi, W and Li, S and Zhang, J and Wang, X}, title = {Performance and energy optimization of ternary optical computers based on tandem queuing system.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {15037}, pmid = {40301430}, issn = {2045-2322}, abstract = {As an emerging computer technology with numerous bits, bit-wise allocation, and extensive parallelism, the ternary optical computer (TOC) will play an important role in platforms such as cloud computing and big data. Previous studies on TOC in handling computational request tasks have mainly focused on performance enhancement while ignoring the impact of performance enhancement on power consumption. The main objective of this study is to investigate the optimization trade-off between performance and energy consumption in TOC systems. To this end, the service model of the TOC is constructed by introducing the M/M/1 and M/M/c models in queuing theory, combined with the framework of the tandem queueing system, and the optimization problem is studied by adjusting the processor partitioning strategy and the number of small TOC (STOC) in the service process. The results show that the value of increasing active STOCs is prominent when system performance significantly depends on response time. However, marginal gains decrease as the number of STOCs grows, accompanied by rising energy costs. Based on these findings, this paper constructs a bi-objective optimization model using response time and energy consumption. It proposes an optimization strategy to achieve bi-objective optimization of performance and energy consumption for TOC by identifying the optimal partitioning strategy and the number of active small optical processors for different load conditions.}, } @article {pmid40301332, year = {2025}, author = {Zhu, Q and Li, Z and Dong, J and Fu, P and Cheng, Q and Cai, J and Gurgel, H and Yang, L}, title = {Spatiotemporal dataset of dengue influencing factors in Brazil based on geospatial big data cloud computing.}, journal = {Scientific data}, volume = {12}, number = {1}, pages = {712}, pmid = {40301332}, issn = {2052-4463}, mesh = {Brazil/epidemiology ; *Dengue/epidemiology/transmission ; Humans ; *Big Data ; *Cloud Computing ; Spatio-Temporal Analysis ; Risk Factors ; Animals ; }, abstract = {Dengue fever has been spreading rapidly worldwide, with a notably high prevalence in South American countries such as Brazil. Its transmission dynamics are governed by the vector population dynamics and the interactions among humans, vectors, and pathogens, which are further shaped by environmental factors. Calculating these environmental indicators is challenging due to the limited spatial coverage of weather station observations and the time-consuming processes involved in downloading and processing local data, such as satellite imagery. This issue is exacerbated in large-scale studies, making it difficult to develop comprehensive and publicly accessible datasets of disease-influencing factors. Addressing this challenge necessitates the efficient data integration methods and the assembly of multi-factorial datasets to aid public health authorities in understanding dengue transmission mechanisms and improving risk prediction models. In response, we developed a population-weighted dataset of 12 dengue risk factors, covering 558 microregions in Brazil over 1252 epidemiological weeks from 2001 to 2024. This dataset and the associated methodology streamline data processing for researchers and can be adapted for other vector-borne disease studies.}, } @article {pmid40292999, year = {2025}, author = {Yang, S}, title = {Privacy-Preserving Multi-User Graph Intersection Scheme for Wireless Communications in Cloud-Assisted Internet of Things.}, journal = {Sensors (Basel, Switzerland)}, volume = {25}, number = {6}, pages = {}, pmid = {40292999}, issn = {1424-8220}, abstract = {Cloud-assisted Internet of Things (IoT) has become the core infrastructure of smart society since it solves the computational power, storage, and collaboration bottlenecks of traditional IoT through resource decoupling and capability complementarity. The development of a graph database and cloud-assisted IoT promotes the research of privacy preserving graph computation. We propose a secure graph intersection scheme that supports multi-user intersection queries in cloud-assisted IoT in this article. The existing work on graph encryption for intersection queries is designed for a single user, which will bring high computational and communication costs for data owners, or cause the risk of secret key leaking if directly applied to multi-user scenarios. To solve these problems, we employ the proxy re-encryption (PRE) that transforms the encrypted graph data with a re-encryption key to enable the graph intersection results to be decrypted by an authorized IoT user using their own private key, while data owners only encrypt their graph data on IoT devices once. In our scheme, different IoT users can query for the intersection of graphs flexibly, while data owners do not need to perform encryption operations every time an IoT user makes a query. Theoretical analysis and simulation results demonstrate that the graph intersection scheme in this paper is secure and practical.}, } @article {pmid40292910, year = {2025}, author = {Ficili, I and Giacobbe, M and Tricomi, G and Puliafito, A}, title = {From Sensors to Data Intelligence: Leveraging IoT, Cloud, and Edge Computing with AI.}, journal = {Sensors (Basel, Switzerland)}, volume = {25}, number = {6}, pages = {}, pmid = {40292910}, issn = {1424-8220}, abstract = {The exponential growth of connected devices and sensor networks has revolutionized data collection and monitoring across industries, from healthcare to smart cities. However, the true value of these systems lies not merely in gathering data but in transforming it into actionable intelligence. The integration of IoT, cloud computing, edge computing, and AI offers a robust pathway to achieve this transformation, enabling real-time decision-making and predictive insights. This paper explores innovative approaches to combine these technologies, emphasizing their role in enabling real-time decision-making, predictive analytics, and low-latency data processing. This work analyzes several integration approaches among IoT, cloud/edge computing, and AI through examples and applications, highlighting challenges and approaches to seamlessly integrate these techniques to achieve pervasive environmental intelligence. The findings contribute to advancing pervasive environmental intelligence, offering a roadmap for building smarter, more sustainable infrastructure.}, } @article {pmid40292891, year = {2025}, author = {Yang, H and Dong, R and Guo, R and Che, Y and Xie, X and Yang, J and Zhang, J}, title = {Real-Time Acoustic Scene Recognition for Elderly Daily Routines Using Edge-Based Deep Learning.}, journal = {Sensors (Basel, Switzerland)}, volume = {25}, number = {6}, pages = {}, pmid = {40292891}, issn = {1424-8220}, support = {202301BD070001-114//Yunnan Agricultural University/ ; 2024-55 and 2021YLKC126//Yunnan Agricultural University/ ; }, mesh = {Humans ; *Deep Learning ; Aged ; *Acoustics ; Neural Networks, Computer ; Activities of Daily Living ; Wearable Electronic Devices ; }, abstract = {The demand for intelligent monitoring systems tailored to elderly living environments is rapidly increasing worldwide with population aging. Traditional acoustic scene monitoring systems that rely on cloud computing are limited by data transmission delays and privacy concerns. Hence, this study proposes an acoustic scene recognition system that integrates edge computing with deep learning to enable real-time monitoring of elderly individuals' daily activities. The system consists of low-power edge devices equipped with multiple microphones, portable wearable components, and compact power modules, ensuring its seamless integration into the daily lives of the elderly. We developed four deep learning models-convolutional neural network, long short-term memory, bidirectional long short-term memory, and deep neural network-and used model quantization techniques to reduce the computational complexity and memory usage, thereby optimizing them to meet edge device constraints. The CNN model demonstrated superior performance compared to the other models, achieving 98.5% accuracy, an inference time of 2.4 ms, and low memory requirements (25.63 KB allocated for Flash and 5.15 KB for RAM). This architecture provides an efficient, reliable, and user-friendly solution for real-time acoustic scene monitoring in elderly care.}, } @article {pmid40292792, year = {2025}, author = {Vieira, D and Oliveira, M and Arrais, R and Melo, P}, title = {Application of Cloud Simulation Techniques for Robotic Software Validation.}, journal = {Sensors (Basel, Switzerland)}, volume = {25}, number = {6}, pages = {}, pmid = {40292792}, issn = {1424-8220}, support = {00127-IEETA//This work is funded by FCT (Foundation for Science and Technology) under unit 00127-IEETA./ ; 101120406.//This project has received funding from the European Union's Horizon Europe research and innovation programme under the Grant Agreement 101120406./ ; }, abstract = {Continuous Integration and Continuous Deployment are known methodologies for software development that increase the overall quality of the development process. Several robotic software repositories make use of CI/CD tools as an aid to development. However, very few CI pipelines take advantage of using cloud computing to run simulations. Here, a CI pipeline is proposed that takes advantage of such features, applied to the development of ATOM, a ROS-based application capable of carrying out the calibration of generalized robotic systems. The proposed pipeline uses GitHub Actions as a CI/CD engine, AWS RoboMaker as a service for running simulations on the cloud and Rigel as a tool to both containerize ATOM and execute the tests. In addition, a static analysis and unit testing component is implemented with the use of Codacy. The creation of the pipeline was successful, and it was concluded that it constitutes a valuable tool for the development of ATOM and a blueprint for the creation of similar pipelines for other robotic systems.}, } @article {pmid40292726, year = {2025}, author = {Chang, YH and Wu, FC and Lin, HW}, title = {Design and Implementation of ESP32-Based Edge Computing for Object Detection.}, journal = {Sensors (Basel, Switzerland)}, volume = {25}, number = {6}, pages = {}, pmid = {40292726}, issn = {1424-8220}, abstract = {This paper explores the application of the ESP32 microcontroller in edge computing, focusing on the design and implementation of an edge server system to evaluate performance improvements achieved by integrating edge and cloud computing. Responding to the growing need to reduce cloud burdens and latency, this research develops an edge server, detailing the ESP32 hardware architecture, software environment, communication protocols, and server framework. A complementary cloud server software framework is also designed to support edge processing. A deep learning model for object recognition is selected, trained, and deployed on the edge server. Performance evaluation metrics, classification time, MQTT (Message Queuing Telemetry Transport) transmission time, and data from various MQTT brokers are used to assess system performance, with particular attention to the impact of image size adjustments. Experimental results demonstrate that the edge server significantly reduces bandwidth usage and latency, effectively alleviating the load on the cloud server. This study discusses the system's strengths and limitations, interprets experimental findings, and suggests potential improvements and future applications. By integrating AI and IoT, the edge server design and object recognition system demonstrates the benefits of localized edge processing in enhancing efficiency and reducing cloud dependency.}, } @article {pmid40289232, year = {2025}, author = {Alsadie, D and Alsulami, M}, title = {Modified grey wolf optimization for energy-efficient internet of things task scheduling in fog computing.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {14730}, pmid = {40289232}, issn = {2045-2322}, abstract = {Fog-cloud computing has emerged as a transformative paradigm for managing the growing demands of Internet of Things (IoT) applications, where efficient task scheduling is crucial for optimizing system performance. However, existing task scheduling methods often struggle to balance makespan minimization and energy efficiency in dynamic and resource-constrained fog-cloud environments. Addressing this gap, this paper introduces a novel Task Scheduling algorithm based on a modified Grey Wolf Optimization approach (TS-GWO), tailored specifically for IoT requests in fog-cloud systems. The proposed TS-GWO incorporates innovative operators to enhance exploration and exploitation capabilities, enabling the identification of optimal scheduling solutions. Extensive evaluations using both synthetic and real-world datasets, such as NASA Ames iPSC and HPC2N workloads, demonstrate the superior performance of TS-GWO over established metaheuristic methods. Notably, TS-GWO achieves improvements in makespan by up to 46.15% and reductions in energy consumption by up to 28.57%. These results highlight the potential of TS-GWO to effectively address task scheduling challenges in fog-cloud environments, paving the way for its application in broader optimization tasks.}, } @article {pmid40285084, year = {2025}, author = {Yang, H and Xiong, M and Yao, Y}, title = {MODIS-Based Spatiotemporal Inversion and Driving-Factor Analysis of Cloud-Free Vegetation Cover in Xinjiang from 2000 to 2024.}, journal = {Sensors (Basel, Switzerland)}, volume = {25}, number = {8}, pages = {}, pmid = {40285084}, issn = {1424-8220}, support = {42401534//the National Natural Science Foundation of China/ ; 2024KTSCX052//Research Projects of Department of Education of Guangdong Province/ ; 6023310030K//Shenzhen Polytechnic University Research Fund/ ; 6023240118K//Shenzhen Polytechnic University Research Fund/ ; 6024310045K//Shenzhen Polytechnic University Research Fund/ ; }, mesh = {China ; Ecosystem ; Spatio-Temporal Analysis ; Conservation of Natural Resources ; *Environmental Monitoring/methods ; Remote Sensing Technology ; }, abstract = {The Xinjiang Uygur Autonomous Region, characterized by its complex and fragile ecosystems, has faced ongoing ecological degradation in recent years, challenging national ecological security and sustainable development. To promote the sustainable development of regional ecological and landscape conservation, this study investigates Fractional Vegetation Cover (FVC) dynamics in Xinjiang. Existing studies often lack recent data and exhibit limitations in the selection of driving factors. To mitigate the issues, this study utilized Google Earth Engine (GEE) and cloud-free MOD13A2.061 data to systematically generate comprehensive FVC products for Xinjiang from 2000 to 2024. Additionally, a comprehensive and quantitative analysis of up to 15 potential driving factors was conducted, providing an updated and more robust understanding of vegetation dynamics in the region. This study integrated advanced methodologies, including spatiotemporal statistical analysis, optimized spatial scaling, trend analysis, and Geographical Detector (GeoDetector). Notably, we propose a novel approach combining a Theil-Sen Median trend analysis with a Hurst index to predict future vegetation trends, which to some extent enhances the persuasiveness of the Hurst index alone. The following are the key experimental results: (1) Over the 25-year study period, Xinjiang's vegetation cover exhibited a pronounced north-south gradient, with significantly higher FVC in the northern regions compared to the southern regions. (2) A time series analysis revealed an overall fluctuating upward trend in the FVC, accompanied by increasing volatility and decreasing stability over time. (3) Identification of 15 km as the optimal spatial scale for FVC analysis through spatial statistical analysis using Moran's I and the coefficient of variation. (4) Land use type, vegetation type, and soil type emerged as critical factors, with each contributing over 20% to the explanatory power of FVC variations. (5) To elucidate spatial heterogeneity mechanisms, this study conducted ecological subzone-based analyses of vegetation dynamics and drivers.}, } @article {pmid40285052, year = {2025}, author = {Ahoa, E and Kassahun, A and Verdouw, C and Tekinerdogan, B}, title = {Challenges and Solution Directions for the Integration of Smart Information Systems in the Agri-Food Sector.}, journal = {Sensors (Basel, Switzerland)}, volume = {25}, number = {8}, pages = {}, pmid = {40285052}, issn = {1424-8220}, mesh = {*Agriculture/methods ; *Information Systems ; Humans ; Artificial Intelligence ; }, abstract = {Traditional farming has evolved from standalone computing systems to smart farming, driven by advancements in digitalization. This has led to the proliferation of diverse information systems (IS), such as IoT and sensor systems, decision support systems, and farm management information systems (FMISs). These systems often operate in isolation, limiting their overall impact. The integration of IS into connected smart systems is widely addressed as a key driver to tackle these issues. However, it is a complex, multi-faceted issue that is not easily achievable. Previous studies have offered valuable insights, but they often focus on specific cases, such as individual IS and certain integration aspects, lacking a comprehensive overview of various integration dimensions. This systematic review of 74 scientific papers on IS integration addresses this gap by providing an overview of the digital technologies involved, integration levels and types, barriers hindering integration, and available approaches to overcoming these challenges. The findings indicate that integration primarily relies on a point-to-point approach, followed by cloud-based integration. Enterprise service bus, hub-and-spoke, and semantic web approaches are mentioned less frequently but are gaining interest. The study identifies and discusses 27 integration challenges into three main areas: organizational, technological, and data governance-related challenges. Technologies such as blockchain, data spaces, AI, edge computing and microservices, and service-oriented architecture methods are addressed as solutions for data governance and interoperability issues. The insights from the study can help enhance interoperability, leading to data-driven smart farming that increases food production, mitigates climate change, and optimizes resource usage.}, } @article {pmid40277407, year = {2025}, author = {Pietris, J and Bahrami, B and LaHood, B and Goggin, M and Chan, WO}, title = {Cataract Surgery Registries: History, Utility, Barriers and Future.}, journal = {Journal of cataract and refractive surgery}, volume = {}, number = {}, pages = {}, doi = {10.1097/j.jcrs.0000000000001680}, pmid = {40277407}, issn = {1873-4502}, abstract = {Cataract surgery databases have become indispensable tools in ophthalmology, providing extensive data that enhance surgical practices and patient care. This narrative review traces the development of these databases, and summarises some of the significant contributions of these databases, such as improved surgical outcomes, informed clinical guidelines, and enhanced quality assurance. There are significant barriers to establishing and maintaining cataract surgery databases, including data protection and management challenges, economic constraints, technological hurdles, and ethical considerations. These obstacles complicate efforts to ensure data accuracy, standardisation, and interoperability across diverse healthcare settings. Large language models, and artificial intelligence has potential in streamlining data collection and analysis for the future of these databases. Innovations like blockchain for data security and cloud computing for scalability are examined as solutions to current limitations. Addressing the existing challenges and leveraging technological advancements will be crucial for the continued evolution and utility of these databases, ensuring they remain pivotal in advancing cataract surgery and patient care.}, } @article {pmid40271661, year = {2025}, author = {Beyvers, S and Jelonek, L and Goesmann, A and Schwengers, O}, title = {Bakta Web - rapid and standardized genome annotation on scalable infrastructures.}, journal = {Nucleic acids research}, volume = {}, number = {}, pages = {}, doi = {10.1093/nar/gkaf335}, pmid = {40271661}, issn = {1362-4962}, support = {FAIRDS08//Federal Ministry of Education and Research/ ; 031L0288B//Deep-Legion/ ; W-de.NBI-010//German Network for Bioinformatics Infrastructure/ ; 031A533//BiGi Service Center/ ; //Justus Liebig University Giessen/ ; }, abstract = {The Bakta command line application is widely used and one of the most established tools for bacterial genome annotation. It balances comprehensive annotation with computational efficiency via alignment-free sequence identifications. However, the usage of command line software tools and the interpretation of result files in various formats might be challenging and pose technical barriers. Here, we present the recent updates on the Bakta web server, a user-friendly web interface for conducting and visualizing annotations using Bakta without requiring command line expertise or local computing resources. Key features include interactive visualizations through circular genome plots, linear genome browsers, and searchable data tables facilitating the interpretation of complex annotation results. The web server generates standard bioinformatics outputs (GFF3, GenBank, EMBL) and annotates diverse genomic features, including coding sequences, non-coding RNAs, small open reading frames (sORFs), and many more. The development of an auto-scaling cloud-native architecture and improved database integration led to substantially faster processing times and higher throughputs. The system supports FAIR principles via extensive cross-reference links to external databases, including RefSeq, UniRef, and Gene Ontology. Also, novel features have been implemented to foster sharing and collaborative interpretation of results. The web server is freely available at https://bakta.computational.bio.}, } @article {pmid40267359, year = {2025}, author = {Xiao, J and Wu, J and Liu, D and Li, X and Liu, J and Su, X and Wang, Y}, title = {Improved Pine Wood Nematode Disease Diagnosis System Based on Deep Learning.}, journal = {Plant disease}, volume = {109}, number = {4}, pages = {862-874}, doi = {10.1094/PDIS-06-24-1221-RE}, pmid = {40267359}, issn = {0191-2917}, mesh = {*Deep Learning ; Animals ; *Pinus/parasitology ; *Plant Diseases/parasitology ; *Nematoda/isolation & purification/physiology ; *Tylenchida/isolation & purification ; }, abstract = {Pine wilt disease caused by the pine wood nematode, Bursaphelenchus xylophilus, has profound implications for global forestry ecology. Conventional PCR methods need long operating time and are complicated to perform. The need for rapid and effective detection methodologies to curtail its dissemination and reduce pine felling has become more apparent. This study initially proposed the use of fluorescence recognition for the detection of pine wood nematode disease, accompanied by the development of a dedicated fluorescence detection system based on deep learning. This system possesses the capability to perform excitation, detection, as well as data analysis and transmission of test samples. In exploring fluorescence recognition methodologies, the efficacy of five conventional machine learning algorithms was juxtaposed with that of You Only Look Once version 5 and You Only Look Once version 10, both in the pre- and post-image processing stages. Moreover, enhancements were introduced to the You Only Look Once version 5 model. The network's aptitude for discerning features across varied scales and resolutions was bolstered through the integration of Res2Net. Meanwhile, a SimAM attention mechanism was incorporated into the backbone network, and the original PANet structure was replaced by the Bi-FPN within the Head network to amplify feature fusion capabilities. The enhanced YOLOv5 model demonstrates significant improvements, particularly in the recognition of large-size images, achieving an accuracy improvement of 39.98%. The research presents a novel detection system for pine nematode detection, capable of detecting samples with DNA concentrations as low as 1 fg/μl within 20 min. This system integrates detection instruments, laptops, cloud computing, and smartphones, holding tremendous potential for field application.}, } @article {pmid40265605, year = {2025}, author = {Yin, Y and Liu, B and Zhang, Y and Han, Y and Liu, Q and Feng, J}, title = {Wafer-Scale Nanoprinting of 3D Interconnects beyond Cu.}, journal = {ACS nano}, volume = {}, number = {}, pages = {}, doi = {10.1021/acsnano.5c00720}, pmid = {40265605}, issn = {1936-086X}, abstract = {Cloud operations and services, as well as many other modern computing tasks, require hardware that is run by very densely packed integrated circuits (ICs) and heterogenous ICs. The performance of these ICs is determined by the stability and properties of the interconnects between the semiconductor devices and ICs. Although some ICs with 3D interconnects are commercially available, there has been limited progress on 3D printing utilizing emerging nanomaterials. Moreover, laying out reliable 3D metal interconnects in ICs with the appropriate electrical and physical properties remains challenging. Here, we propose high-throughput 3D interconnection with nanoscale precision by leveraging lines of forces. We successfully nanoprinted multiscale and multilevel Au, Ir, and Ru 3D interconnects on the wafer scale in non-vacuum conditions using a pulsed electric field. The ON phase of the pulsed field initiates in situ printing of nanoparticle (NP) deposition into interconnects, whereas the OFF phase allows the gas flow to evenly distribute the NPs over an entire wafer. Characterization of the 3D interconnects confirms their excellent uniformity, electrical properties, and free-form geometries, far exceeding those of any 3D-printed interconnects. Importantly, their measured resistances approach the theoretical values calculated here. The results demonstrate that 3D nanoprinting can be used to fabricate thinner and faster interconnects, which can enhance the performance of dense ICs; therefore, 3D nanoprinting can complement lithography and resolve the challenges encountered in the fabrication of critical device features.}, } @article {pmid40265427, year = {2025}, author = {Pérez-Sanpablo, AI and Quinzaños-Fresnedo, J and Gutiérrez-Martínez, J and Lozano-Rodríguez, IG and Roldan-Valadez, E}, title = {Transforming Medical Imaging: The Role of Artificial Intelligence Integration in PACS for Enhanced Diagnostic Accuracy and Workflow Efficiency.}, journal = {Current medical imaging}, volume = {}, number = {}, pages = {}, doi = {10.2174/0115734056370620250403030638}, pmid = {40265427}, issn = {1573-4056}, abstract = {INTRODUCTION: To examine the integration of artificial intelligence (AI) into Picture Archiving and Communication Systems (PACS) and assess its impact on medical imaging, diagnostic workflows, and patient outcomes. This review explores the technological evolution, key advancements, and challenges associated with AI-enhanced PACS in healthcare settings.

METHODS: A comprehensive literature search was conducted in PubMed, Scopus, and Web of Science databases, covering articles from January 2000 to October 2024. Search terms included "artificial intelligence," "machine learning," "deep learning," and "PACS," combined with keywords related to diagnostic accuracy and workflow optimization. Articles were selected based on predefined inclusion and exclusion criteria, focusing on peerreviewed studies that discussed AI applications in PACS, innovations in medical imaging, and workflow improvements. A total of 183 studies met the inclusion criteria, comprising original research, systematic reviews, and meta-analyses.

RESULTS: AI integration in PACS has significantly enhanced diagnostic accuracy, achieving improvements of up to 93.2% in some imaging modalities, such as early tumor detection and anomaly identification. Workflow efficiency has been transformed, with diagnostic times reduced by up to 90% for critical conditions like intracranial hemorrhages. Convolutional neural networks (CNNs) have demonstrated exceptional performance in image segmentation, achieving up to 94% accuracy, and in motion artifact correction, further enhancing diagnostic precision. Natural language processing (NLP) tools have expedited radiology workflows, reducing reporting times by 30-50% and improving consistency in report generation. Cloudbased solutions have also improved accessibility, enabling real-time collaboration and remote diagnostics. However, challenges in data privacy, regulatory compliance, and interoperability persist, emphasizing the need for standardized frameworks and robust security protocols. Conclusions The integration of AI into PACS represents a pivotal transformation in medical imaging, offering improved diagnostic workflows and potential for personalized patient care. Addressing existing challenges and enhancing interoperability will be essential for maximizing the benefits of AIpowered PACS in healthcare.}, } @article {pmid40256223, year = {2025}, author = {Rezaee, K and Nazerian, A and Ghayoumi Zadeh, H and Attar, H and Khosravi, M and Kanan, M}, title = {Smart IoT-driven biosensors for EEG-based driving fatigue detection: A CNN-XGBoost model enhancing healthcare quality.}, journal = {BioImpacts : BI}, volume = {15}, number = {}, pages = {30586}, pmid = {40256223}, issn = {2228-5652}, abstract = {INTRODUCTION: Drowsy driving is a significant contributor to accidents, accounting for 35 to 45% of all crashes. Implementation of an internet of things (IoT) system capable of alerting fatigued drivers has the potential to substantially reduce road fatalities and associated issues. Often referred to as the internet of medical things (IoMT), this system leverages a combination of biosensors, actuators, detectors, cloud-based and edge computing, machine intelligence, and communication networks to deliver reliable performance and enhance quality of life in smart societies.

METHODS: Electroencephalogram (EEG) signals offer potential insights into fatigue detection. However, accurately identifying fatigue from brain signals is challenging due to inter-individual EEG variability and the difficulty of collecting sufficient data during periods of exhaustion. To address these challenges, a novel evolutionary optimization method combining convolutional neural networks (CNNs) and XGBoost, termed CNN-XGBoost Evolutionary Learning, was proposed to improve fatigue identification accuracy. The research explored various subbands of decomposed EEG data and introduced an innovative approach of transforming EEG recordings into RGB scalograms. These scalogram images were processed using a 2D Convolutional Neural Network (2DCNN) to extract essential features, which were subsequently fed into a dense layer for training.

RESULTS: The resulting model achieved a noteworthy accuracy of 99.80% on a substantial driver fatigue dataset, surpassing existing methods.

CONCLUSION: By integrating this approach into an IoT framework, researchers effectively addressed previous challenges and established an artificial intelligence of things (AIoT) infrastructure for critical driving conditions. This IoT-based system optimizes data processing, reduces computational complexity, and enhances overall system performance, enabling accurate and timely detection of fatigue in extreme driving environments.}, } @article {pmid40253394, year = {2025}, author = {Alzakari, SA and Alamgeer, M and Alashjaee, AM and Abdullah, M and Abdul Sattar, KN and Alshuhail, A and Alzahrani, AA and Alkharashi, A}, title = {Heuristically enhanced multi-head attention based recurrent neural network for denial of wallet attacks detection on serverless computing environment.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {13538}, pmid = {40253394}, issn = {2045-2322}, mesh = {*Neural Networks, Computer ; *Computer Security ; *Cloud Computing ; Algorithms ; Heuristics ; Recurrent Neural Networks ; }, abstract = {Denial of Wallet (DoW) attacks are a cyber threat designed to utilize and deplete an organization's financial resources by generating excessive prices or charges in their cloud computing (CC) and serverless computing platforms. These threats are primarily appropriate in serverless manners because of features such as auto-scaling, pay-as-you-go, restricted control, and cost growth. Serverless computing, frequently recognized as Function-as-a-Service (FaaS), is a CC method that permits designers to construct and run uses without the requirement to accomplish typical server structure. Detecting DoW threats involves monitoring and analyzing the system-level resource consumption of specific bare-metal mechanisms. Efficient and precise detection of internal DoW threats remains a crucial challenge. Timely recognition is significant in preventing potential damage, as DoW attacks exploit the financial model of serverless environments, impacting the cost structure and operational integrity of services. In this study, a Multi-Head Attention-based Recurrent Neural Network for Denial of Wallet Attacks Detection (MHARNN-DoWAD) technique is developed. The MHARNN-DoWAD method enables the detection of DoW attacks on serverless computing environments. At first, the presented MHARNN-DoWAD model performs data preprocessing by using min-max normalization to convert input data into constant format. Next, the wolf pack predation (WPP) method is employed for feature selection. The detection and classification of DoW attacks, the multi-head attention-based bi-directional gated recurrent unit (MHA-BiGRU) model is utilized. Eventually, the improved secretary bird optimizer algorithm (ISBOA)-based hyperparameter choice process is accomplished to optimize the detection results of the MHA-BiGRU model. A comprehensive set of simulations was conducted to demonstrate the promising results of the MHARNN-DoWAD method. The experimental validation of the MHARNN-DoWAD technique portrayed a superior accuracy value of 98.30% over existing models.}, } @article {pmid40249680, year = {2025}, author = {Brito, CV and Ferreira, PG and Paulo, JT}, title = {Exploiting Trusted Execution Environments and Distributed Computation for Genomic Association Tests.}, journal = {IEEE journal of biomedical and health informatics}, volume = {PP}, number = {}, pages = {}, doi = {10.1109/JBHI.2025.3562364}, pmid = {40249680}, issn = {2168-2208}, abstract = {Breakthroughs in sequencing technologies led to an exponential growth of genomic data, providing novel biological insights and therapeutic applications. However, analyzing large amounts of sensitive data raises key data privacy concerns, specifically when the information is outsourced to untrusted third-party infrastructures for data storage and processing (e.g., cloud computing). We introduce Gyosa, a secure and privacy-preserving distributed genomic analysis solution. By leveraging trusted execution environments (TEEs), Gyosa allows users to confidentially delegate their GWAS analysis to untrusted infrastructures. Gyosa implements a computation partitioning scheme that reduces the computation done inside the TEEs while safeguarding the users' genomic data privacy. By integrating this security scheme in Glow, Gyosa provides a secure and distributed environment that facilitates diverse GWAS studies. The experimental evaluation validates the applicability and scalability of Gyosa, reinforcing its ability to provide enhanced security guarantees.}, } @article {pmid40244301, year = {2025}, author = {Kocak, B and Ponsiglione, A and Romeo, V and Ugga, L and Huisman, M and Cuocolo, R}, title = {Radiology AI and sustainability paradox: environmental, economic, and social dimensions.}, journal = {Insights into imaging}, volume = {16}, number = {1}, pages = {88}, pmid = {40244301}, issn = {1869-4101}, abstract = {Artificial intelligence (AI) is transforming radiology by improving diagnostic accuracy, streamlining workflows, and enhancing operational efficiency. However, these advancements come with significant sustainability challenges across environmental, economic, and social dimensions. AI systems, particularly deep learning models, require substantial computational resources, leading to high energy consumption, increased carbon emissions, and hardware waste. Data storage and cloud computing further exacerbate the environmental impact. Economically, the high costs of implementing AI tools often outweigh the demonstrated clinical benefits, raising concerns about their long-term viability and equity in healthcare systems. Socially, AI risks perpetuating healthcare disparities through biases in algorithms and unequal access to technology. On the other hand, AI has the potential to improve sustainability in healthcare by reducing low-value imaging, optimizing resource allocation, and improving energy efficiency in radiology departments. This review addresses the sustainability paradox of AI from a radiological perspective, exploring its environmental footprint, economic feasibility, and social implications. Strategies to mitigate these challenges are also discussed, alongside a call for action and directions for future research. CRITICAL RELEVANCE STATEMENT: By adopting an informed and holistic approach, the radiology community can ensure that AI's benefits are realized responsibly, balancing innovation with sustainability. This effort is essential to align technological advancements with environmental preservation, economic sustainability, and social equity. KEY POINTS: AI has an ambivalent potential, capable of both exacerbating global sustainability issues and offering increased productivity and accessibility. Addressing AI sustainability requires a broad perspective accounting for environmental impact, economic feasibility, and social implications. By embracing the duality of AI, the radiology community can adopt informed strategies at individual, institutional, and collective levels to maximize its benefits while minimizing negative impacts.}, } @article {pmid40237923, year = {2025}, author = {Ansari, N and Kumari, P and Kumar, R and Kumar, P and Shamshad, A and Hossain, S and Sharma, A and Singh, Y and Kumari, M and Mishra, VN and Rukhsana, and Javed, A}, title = {Seasonal patterns of air pollution in Delhi: interplay between meteorological conditions and emission sources.}, journal = {Environmental geochemistry and health}, volume = {47}, number = {5}, pages = {175}, pmid = {40237923}, issn = {1573-2983}, mesh = {*Seasons ; India ; *Air Pollutants/analysis ; *Air Pollution/analysis/statistics & numerical data ; *Environmental Monitoring ; Carbon Monoxide/analysis ; Sulfur Dioxide/analysis ; Nitrogen Dioxide/analysis ; Particulate Matter/analysis ; Ozone/analysis ; Cities ; Meteorological Concepts ; }, abstract = {Air pollution (AP) poses a significant public health risk, particularly in developing countries, where it contributes to a growing prevalence of health issues. This study investigates seasonal variations in key air pollutants, including particulate matter, nitrogen dioxide (NO2), sulfur dioxide (SO2), carbon monoxide (CO), and ozone (O3), in New Delhi during 2024. Utilizing Sentinel-5 satellite data processed through the Google earth engine (GEE), a cloud-based geospatial analysis platform, the study evaluates pollutant dynamics during pre-monsoon and post-monsoon seasons. The methodology involved programming in JavaScript to extract pollution parameters, applying cloud filters to eliminate contaminated data, and generating average pollution maps at monthly, seasonal, and annual intervals. The results revealed distinct seasonal pollution patterns. Pre-monsoon root mean square error (RMSE) values for CO, NO2, SO2, and O3 were 0.13, 2.58, 4.62, and 2.36, respectively, while post-monsoon values were 0.17, 2.41, 4.31, and 4.60. Winter months exhibited the highest pollution levels due to increased emissions from biomass burning, vehicular activity, and industrial operations, coupled with atmospheric inversions. Conversely, monsoon months saw a substantial reduction in pollutant levels due to wet deposition and improved dispersion driven by stronger winds. Additionally, post-monsoon crop residue burning emerged as a major episodic pollution source. This study underscores the utility of Sentinel-5 products in monitoring urban air pollution and provides valuable insights for policymakers to develop targeted mitigation strategies, particularly for urban megacities like Delhi, where seasonal and source-specific interventions are crucial for reducing air pollution and its associated health risks.}, } @article {pmid40230982, year = {2024}, author = {Zao, JK and Wu, JT and Kanyimbo, K and Delizy, F and Gan, TT and Kuo, HI and Hsia, CH and Lo, CH and Yang, SH and Richard, CJA and Rajab, B and Monawe, M and Kamanga, B and Mtambalika, N and Yu, KJ and Chou, CF and Neoh, CA and Gallagher, J and O'Donoghue, J and Mtegha, R and Lee, HY and Mbewe, A}, title = {Design of a Trustworthy Cloud-Native National Digital Health Information Infrastructure for Secure Data Management and Use.}, journal = {Oxford open digital health}, volume = {2}, number = {}, pages = {oqae043}, pmid = {40230982}, issn = {2754-4591}, abstract = {Since 2022, Malawi Ministry of Health (MoH) designated the development of a National Digital Health Information System (NDHIS) as one of the most important pillars of its national health strategy. This system is built upon a distributed computing infrastructure employing the following state-of-art technologies: (i) digital healthcare devices to capture medical data; (ii) Kubernetes-based Cloud-Native Computing architecture to simplify system management and service deployment; (iii) Zero-Trust Secure Communication to protect confidentiality, integrity and access rights of medical data transported over the Internet; (iv) Trusted Computing to allow medical data to be processed by certified software without compromising data privacy and sovereignty. Trustworthiness, including reliability, security, privacy and business integrity, of this system was ensured by a peer-to-peer network of trusted medical information guards deployed as the gatekeepers of the computing facility on this system. This NDHIS can facilitate Malawi to attain universal health coverage by 2030 through its scalability and operation efficiency. It shall improve medical data quality and security by adopting a paperless approach. It will also enable MoH to offer data rental services to healthcare researchers and AI model developers around the world. This project is spearheaded by the Digital Health Division (DHD) under MoH. The trustworthy computing infrastructure was designed by a taskforce assembled by the DHD in collaboration with Luke International in Norway, and a consortium of hardware and software solution providers in Taiwan. A prototype that can connect community clinics with a district hospital has been tested at Taiwan Pingtung Christian Hospital.}, } @article {pmid40221359, year = {2025}, author = {Dessevres, E and Valderrama, M and Le Van Quyen, M}, title = {Artificial intelligence for the detection of interictal epileptiform discharges in EEG signals.}, journal = {Revue neurologique}, volume = {}, number = {}, pages = {}, doi = {10.1016/j.neurol.2025.04.001}, pmid = {40221359}, issn = {0035-3787}, abstract = {INTRODUCTION: Over the past decades, the integration of modern technologies - such as electronic health records, cloud computing, and artificial intelligence (AI) - has revolutionized the collection, storage, and analysis of medical data in neurology. In epilepsy, Interictal Epileptiform Discharges (IEDs) are the most established biomarker, indicating an increased likelihood of seizures. Their detection traditionally relies on visual EEG assessment, a time-consuming and subjective process contributing to a high misdiagnosis rate. These limitations have spurred the development of automated AI-driven approaches aimed at improving accuracy and efficiency in IED detection.

METHODS: Research on automated IED detection began 45 years ago, spanning from morphological methods to deep learning techniques. In this review, we examine various IED detection approaches, evaluating their performance and limitations.

RESULTS: Traditional machine learning and deep learning methods have produced the most promising results to date, and their application in IED detection continues to grow. Today, AI-driven tools are increasingly integrated into clinical workflows, assisting clinicians in identifying abnormalities while reducing false-positive rates.

DISCUSSION: To optimize the clinical implementation of automated AI-based IED detection, it is essential to render the codes publicly available and to standardize the datasets and metrics. Establishing uniform benchmarks will enable objective model comparisons and help determine which approaches are best suited for clinical use.}, } @article {pmid40218804, year = {2025}, author = {Ianculescu, M and Constantin, VȘ and Gușatu, AM and Petrache, MC and Mihăescu, AG and Bica, O and Alexandru, A}, title = {Enhancing Connected Health Ecosystems Through IoT-Enabled Monitoring Technologies: A Case Study of the Monit4Healthy System.}, journal = {Sensors (Basel, Switzerland)}, volume = {25}, number = {7}, pages = {}, pmid = {40218804}, issn = {1424-8220}, mesh = {Humans ; Monitoring, Physiologic/methods ; Wireless Technology ; *Internet of Things ; Signal Processing, Computer-Assisted ; Photoplethysmography ; Electromyography ; Galvanic Skin Response/physiology ; }, abstract = {The Monit4Healthy system is an IoT-enabled health monitoring solution designed to address critical challenges in real-time biomedical signal processing, energy efficiency, and data transmission. The system's modular design merges wireless communication components alongside a number of physiological sensors, including galvanic skin response, electromyography, photoplethysmography, and EKG, to allow for the remote gathering and evaluation of health information. In order to decrease network load and enable the quick identification of abnormalities, edge computing is used for real-time signal filtering and feature extraction. Flexible data transmission based on context and available bandwidth is provided through a hybrid communication approach that includes Bluetooth Low Energy and Wi-Fi. Under typical monitoring scenarios, laboratory testing shows reliable wireless connectivity and ongoing battery-powered operation. The Monit4Healthy system is appropriate for scalable deployment in connected health ecosystems and portable health monitoring due to its responsive power management approaches and structured data transmission, which improve the resiliency of the system. The system ensures the reliability of signals whilst lowering latency and data volume in comparison to conventional cloud-only systems. Limitations include the requirement for energy profiling, distinctive hardware miniaturizing, and sustained real-world validation. By integrating context-aware processing, flexible design, and effective communication, the Monit4Healthy system complements existing IoT health solutions and promotes better integration in clinical and smart city healthcare environments.}, } @article {pmid40218550, year = {2025}, author = {Almuseelem, W}, title = {Deep Reinforcement Learning-Enabled Computation Offloading: A Novel Framework to Energy Optimization and Security-Aware in Vehicular Edge-Cloud Computing Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {25}, number = {7}, pages = {}, pmid = {40218550}, issn = {1424-8220}, abstract = {The Vehicular Edge-Cloud Computing (VECC) paradigm has gained traction as a promising solution to mitigate the computational constraints through offloading resource-intensive tasks to distributed edge and cloud networks. However, conventional computation offloading mechanisms frequently induce network congestion and service delays, stemming from uneven workload distribution across spatial Roadside Units (RSUs). Moreover, ensuring data security and optimizing energy usage within this framework remain significant challenges. To this end, this study introduces a deep reinforcement learning-enabled computation offloading framework for multi-tier VECC networks. First, a dynamic load-balancing algorithm is developed to optimize the balance among RSUs, incorporating real-time analysis of heterogeneous network parameters, including RSU computational load, channel capacity, and proximity-based latency. Additionally, to alleviate congestion in static RSU deployments, the framework proposes deploying UAVs in high-density zones, dynamically augmenting both storage and processing resources. Moreover, an Advanced Encryption Standard (AES)-based mechanism, secured with dynamic one-time encryption key generation, is implemented to fortify data confidentiality during transmissions. Further, a context-aware edge caching strategy is implemented to preemptively store processed tasks, reducing redundant computations and associated energy overheads. Subsequently, a mixed-integer optimization model is formulated that simultaneously minimizes energy consumption and guarantees latency constraint. Given the combinatorial complexity of large-scale vehicular networks, an equivalent reinforcement learning form is given. Then a deep learning-based algorithm is designed to learn close-optimal offloading solutions under dynamic conditions. Empirical evaluations demonstrate that the proposed framework significantly outperforms existing benchmark techniques in terms of energy savings. These results underscore the framework's efficacy in advancing sustainable, secure, and scalable intelligent transportation systems.}, } @article {pmid40211053, year = {2025}, author = {Khan, A and Ullah, F and Shah, D and Khan, MH and Ali, S and Tahir, M}, title = {EcoTaskSched: a hybrid machine learning approach for energy-efficient task scheduling in IoT-based fog-cloud environments.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {12296}, pmid = {40211053}, issn = {2045-2322}, abstract = {The widespread adoption of cloud services has posed several challenges, primarily revolving around energy and resource efficiency. Integrating cloud and fog resources can help address these challenges by improving fog-cloud computing environments. Nevertheless, the search for optimal task allocation and energy management in such environments continues. Existing studies have introduced notable solutions; however, it is still a challenging issue to efficiently utilize these heterogeneous cloud resources and achieve energy-efficient task scheduling in fog-cloud of things environment. To tackle these challenges, we propose a novel ML-based EcoTaskSched model, which leverages deep learning for energy-efficient task scheduling in fog-cloud networks. The proposed hybrid model integrates Convolutional Neural Networks (CNNs) with Bidirectional Log-Short Term Memory (BiLSTM) to enhance energy-efficient schedulability and reduce energy usage while ensuring QoS provisioning. The CNN model efficiently extracts workload features from tasks and resources, while the BiLSTM captures complex sequential information, predicting optimal task placement sequences. A real fog-cloud environment is implemented using the COSCO framework for the simulation setup together with four physical nodes from the Azure B2s plan to test the proposed model. The DeFog benchmark is used to develop task workloads, and data collection was conducted for both normal and intense workload scenarios. Before preprocessing the data was normalized, treated with feature engineering and augmentation, and then split into training and test sets. To evaluate performance, the proposed EcoTaskSched model demonstrated superiority by significantly reducing energy consumption and improving job completion rates compared to baseline models. Additionally, the EcoTaskSched model maintained a high job completion rate of 85%, outperforming GGCN and BiGGCN. It also achieved a lower average response time, and SLA violation rates, as well as increased throughput, and reduced execution cost compared to other baseline models. In its optimal configuration, the EcoTaskSched model is successfully applied to fog-cloud computing environments, increasing task handling efficiency and reducing energy consumption while maintaining the required QoS parameters. Our future studies will focus on long-term testing of the EcoTaskSched model in real-world IoT environments. We will also assess its applicability by integrating other ML models, which could provide enhanced insights for optimizing scheduling algorithms across diverse fog-cloud settings.}, } @article {pmid40210938, year = {2025}, author = {Wang, Y and Kong, D and Chai, H and Qiu, H and Xue, R and Li, S}, title = {D2D assisted cooperative computational offloading strategy in edge cloud computing networks.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {12303}, pmid = {40210938}, issn = {2045-2322}, support = {242102210050//Science and Technology Project of Henan Province/ ; 252102210114//Science and Technology Project of Henan Province/ ; 202410467035//National College Students' Innovative Training Program/ ; }, abstract = {In the computational offloading problem of edge cloud computing (ECC), almost all researches develop the offloading strategy by optimizing the user cost, but most of them only consider the delay and energy consumption, and seldom consider the task waiting delay. This is very unfavorable for tasks with high sensitive latency requirements in the current era of intelligence. In this paper, by using D2D (Device-to-Device) technology, we propose a D2D-assisted collaboration computational offloading strategy (D-CCO) based on user cost optimization to obtain the offloading decision and the number of tasks that can be offloaded. Specifically, we first build a task queue system with multiple local devices, peer devices and edge processors, and compare the execution performance of computing tasks on different devices, taking into account user costs such as task delay, power consumption, and wait delay. Then, the stochastic optimization algorithm and the back-pressure algorithm are used to develop the offloading strategy, which ensures the stability of the system and reduces the computing cost to the greatest extent, so as to obtain the optimal offloading decision. In addition, the stability of the proposed algorithm is analyzed theoretically, that is, the upper bounds of all queues in the system are derived. The simulation results show the stability of the proposed algorithm, and demonstrate that the D-CCO algorithm is superior to other alternatives. Compared with other algorithms, this algorithm can effectively reduce the user cost.}, } @article {pmid40204841, year = {2025}, author = {Zhong, A and Wang, Z and Gen, Y}, title = {Research on water body information extraction and monitoring in high water table mining areas based on Google Earth Engine.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {12133}, pmid = {40204841}, issn = {2045-2322}, support = {2241STC60470//Beijing Business Environment Reform and Support Program in the field of ecology and environment/ ; }, abstract = {The extensive and intensive exploitation of coal resources has led to a particularly prominent issue of water accumulation in high groundwater table mining areas, significantly impacting the surrounding ecological environment and directly threatening the red line of cultivated land and regional food security. To provide a scientific basis for the ecological restoration of water accumulation areas in coal mining subsidence, a study on the extraction of water body information in high groundwater level subsidence areas is conducted. The spectral characteristics of land types within mining subsidence areas were analyzed through the application of the Google Earth Engine (GEE) big data cloud platform and Landsat series imagery. This study addressed technical bottlenecks in applying traditional water indices in mining areas, such as spectral interference from coal slag, under-detection of small water bodies, and misclassification of agricultural fields. An Improved Normalized Difference Water Index (INDWI) was proposed based on the analysis of spectral characteristics of surface objects, in conjunction with the OTSU algorithm. The effectiveness of water body extraction using INDWI was compared with that of Normalized Difference Water Index (NDWI), Enhanced Water Index (EWI), and Modified Normalized Difference Water Index (MNDWI). The results indicated that: (1) The INDWI demonstrated the highest overall accuracy, surpassing 89%, and a Kappa coefficient exceeding 80%. The extraction of water body information in mining areas was significantly superior to that achieved by the other three prevalent water indices. (2) The extraction results of the MNDWI and INDWI water Index generally aligned with the actual conditions. The boundaries of water bodies extracted using MNDWI in mining subsidence areas were somewhat ambiguous, leading to the misidentification of small water accumulation pits and misclassification of certain agricultural fields. In contrast, the extraction results of INDWI exhibited better alignment with the imagery, with no significant identification errors observed. (3) Through the comparison of three typical areas, it was concluded that the clarity of the water body boundary lines extracted by INDWI was higher, with relatively fewer internal noise points, and the soil ridges and bridges within the water bodies were distinctly visible, aligning with the actual situation. The research findings offer a foundation for the formulation of land reclamation and ecological restoration plans in coal mining subsidence areas.}, } @article {pmid40200462, year = {2025}, author = {Haddad, T and Kumarapeli, P and de Lusignan, S and Khaddaj, S and Barman, S}, title = {Software Quality Injection (QI): A Quality Driven Holistic Approach for Optimising Big Healthcare Data Processing.}, journal = {Studies in health technology and informatics}, volume = {323}, number = {}, pages = {141-145}, doi = {10.3233/SHTI250065}, pmid = {40200462}, issn = {1879-8365}, mesh = {*Big Data ; *Software/standards ; Humans ; *Data Accuracy ; *Cloud Computing/standards ; *Electronic Health Records/organization & administration ; }, abstract = {The rapid growth of big data is driving innovation in software development, with advanced analytics offering transformative opportunities in applied computing. Big Healthcare Data (BHD), characterised by multi-structured and complex data types, requires resilient and scalable architectures to effectively address critical data quality issues. This paper proposes a holistic framework for adopting advanced cloud-computing strategies to manage and optimise the unique characteristics of BHD processing. It outlines a comprehensive approach for ensuring optimal data handling for critical healthcare workflows by enhancing the system's quality attributes. The proposed framework prioritises and dynamically adjusts software functionalities in real-time, harnessing sophisticated orchestration capabilities to manage complex, multi-dimensional healthcare datasets, streamline operations, and bolster system resilience.}, } @article {pmid40200372, year = {2025}, author = {Landais, P and Gueguen, S and Clement, A and Amselem, S and , }, title = {The RaDiCo information system for rare disease cohorts.}, journal = {Orphanet journal of rare diseases}, volume = {20}, number = {1}, pages = {166}, pmid = {40200372}, issn = {1750-1172}, support = {ANR-10-COHO-0003//Agence Nationale de la Recherche/ ; }, mesh = {*Rare Diseases ; Humans ; *Information Systems ; Databases, Factual ; Cohort Studies ; }, abstract = {BACKGROUND: Rare diseases (RDs) clinical care and research face several challenges. Patients are dispersed over large geographic areas, their number per disease is limited, just like the number of researchers involved. Current databases as well as biological collections, when existing, are generally local, of modest size, incomplete, of uneven quality, heterogeneous in format and content, and rarely accessible or standardised to support interoperability. Most disease phenotypes are complex corresponding to multi-systemic conditions, with insufficient interdisciplinary cooperation. Thus emerged the need to generate, within a coordinated, mutualised, secure and interoperable framework, high-quality data from national or international RD cohorts, based on deep phenotyping, including molecular analysis data, notably genotypic. The RaDiCo program objective was to create, under the umbrella of Inserm, a national operational platform dedicated to the development of RD e-cohorts. Its Information System (IS) is presented here.

MATERIAL AND METHODS: Constructed on the cloud computing principle, the RaDiCo platform was designed to promote mutualization and factorization of processes and services, for both clinical epidemiology support and IS. RaDiCo IS is based on an interoperability framework combining a unique RD identifier, data standardisation, FAIR principles, data exchange flows/processes and data security principles compliant with the European GDPR.

RESULTS: RaDiCo IS favours a secure, open-source web application in order to implement and manage online databases and give patients themselves the opportunity to collect their data. It ensures a continuous monitoring of data quality and consistency over time. RaDiCo IS proved to be efficient, currently hosting 13 e-cohorts, covering 67 distinct RDs. As of April 2024, 8063 patients were recruited from 180 specialised RD sites spread across the national territory.

DISCUSSION: The RaDiCo operational platform is equivalent to a national infrastructure. Its IS enables RD e-cohorts to be developed on a shared platform with no limit on size or number. Compliant with the GDPR, it is compatible with the French National Health Data Hub and can be extended to the RDs European Reference Networks (ERNs).

CONCLUSION: RaDiCo provides a robust IS, compatible with the French Data Hub and RDs ERNs, integrated on a RD platform that enables e-cohorts creation, monitoring and analysis.}, } @article {pmid40200080, year = {2025}, author = {Lilhore, UK and Simaiya, S and Prajapati, YN and Rai, AK and Ghith, ES and Tlija, M and Lamoudan, T and Abdelhamid, AA}, title = {A multi-objective approach to load balancing in cloud environments integrating ACO and WWO techniques.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {12036}, pmid = {40200080}, issn = {2045-2322}, abstract = {Effective load balancing and resource allocation are essential in dynamic cloud computing environments, where the demand for rapidity and continuous service is perpetually increasing. This paper introduces an innovative hybrid optimisation method that combines water wave optimization (WWO) and ant colony optimization (ACO) to tackle these challenges effectively. ACO is acknowledged for its proficiency in conducting local searches effectively, facilitating the swift discovery of high-quality solutions. In contrast, WWO specialises in global exploration, guaranteeing extensive coverage of the solution space. Collectively, these methods harness their distinct advantages to enhance various objectives: decreasing response times, maximising resource efficiency, and lowering operational expenses. We assessed the efficacy of our hybrid methodology by conducting extensive simulations using a cloud-sim simulator and a variety of workload trace files. We assessed our methods in comparison to well-established algorithms, such as WWO, genetic algorithm (GA), spider monkey optimization (SMO), and ACO. Key performance indicators, such as task scheduling duration, execution costs, energy consumption, and resource utilisation, were meticulously assessed. The findings demonstrate that the hybrid WWO-ACO approach enhances task scheduling efficiency by 11%, decreases operational expenses by 8%, and lowers energy usage by 12% relative to conventional methods. In addition, the algorithm consistently achieved an impressive equilibrium in resource allocation, with balance values ranging from 0.87 to 0.95. The results emphasise the hybrid WWO-ACO algorithm's substantial impact on improving system performance and customer satisfaction, thereby demonstrating a significant improvement in cloud computing optimisation techniques.}, } @article {pmid40196761, year = {2025}, author = {Sebin, D and Doda, V and Balamani, S}, title = {Schema: A Quantified Learning Solution to Augment, Assess, and Analyze Learning in Medicine.}, journal = {Cureus}, volume = {17}, number = {4}, pages = {e81803}, pmid = {40196761}, issn = {2168-8184}, abstract = {Quantified learning is the use of digital technologies, such as mobile applications, cloud-based analytics, machine learning algorithms, and real-time performance tracking systems, to deliver more granular, personalized, and measurable educational experiences and outcomes. These principles, along with horizontal and vertical integrative learning, form the basis of modern learning methods. As we witness a global shift from traditional learning to competency-based education, educators agree that there is a need to promote quantified learning. The increased accessibility of technology in educational institutions has allowed unprecedented innovation in learning. The convergence of mobile computing, cloud computing, and Web 2.0 tools has made such models more practical. Despite this, little has been achieved in medical education, where quantified learning and technology aids are limited to a few institutions and used mainly in simulated classroom environments. This innovation report describes the development, dynamics, and scope of Schema, an app-based e-learning solution designed for undergraduate medical students to promote quantified, integrative, high-yield, and self-directed learning along with feedback-based self-assessment and progress monitoring. Schema is linked to a database of preclinical, paraclinical, and clinical multiple choice questions (MCQs) that it organizes into granular subtopics independent of the core subject. It also monitors the progress and performance of the learner as they solve these MCQs and converts that information into quantifiable visual feedback for the learners, which is used to target, improve, revise, and assess their competency. This is important considering the new generation of medical students open to introducing themselves to technology, novel study techniques, and resources outside the traditional learning environment of a medical school. Schema was made available to medical students as part of an e-learning platform in 2022 to aid their learning. In addition, we also aim to use Schema and the range of possibilities it offers to gain deeper insights into the way we learn medicine.}, } @article {pmid40195353, year = {2025}, author = {Xu, Z and Zhou, W and Han, H and Dong, X and Zhang, S and Hu, Z}, title = {A secure and scalable IoT access control framework with dynamic attribute updates and policy hiding.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {11913}, pmid = {40195353}, issn = {2045-2322}, support = {2022BAA040//The Key-Area Research and Development Program of Hubei Province/ ; 2020B1111420002//The Key-Area Research and Development Program of Guangdong Province/ ; 2022-11-4-3//The Science and Technology Project of Department of Transport of Hubei Province/ ; BSQD2019027//The Innovation Fund of Hubei University of Technology/ ; }, abstract = {With the rapid rise of Internet of Things (IoT) technology, cloud computing and attribute-based encryption (ABE) are often employed to safeguard the privacy and security of IoT data. However, most blockchain based access control methods are one-way, and user access policies are public, which cannot simultaneously meet the needs of dynamic attribute updates, two-way verification of users and data, and secure data transmission. To handle such challenges, we propose an attribute-based encryption scheme that satisfies real-time and secure sharing requirements through attribute updates and policy hiding. First, we designed a new dynamic update and policy hiding bidirectional attribute access control (DUPH-BAAC) scheme. In addition, a strategy hiding technique was adopted. The data owner sends encrypted addresses with hidden access policies to the blockchain network for verification through transactions. Then, the user locally matches attributes, the smart contract verifies user permissions, and generates access transactions for users who meet access policies. Moreover, the cloud server receives user identity keys and matches the user attribute set with the ciphertext attribute set. Besides, blockchain networks replace traditional IoT centralized servers for identity authentication, authorization, key management, and attribute updates, reducing information leakage risk. Finally, we demonstrate that the DUPH-BAAC scheme can resist indistinguishable choice access structures and selective plaintext attacks, achieving IND-sAS-CPA security.}, } @article {pmid40193364, year = {2025}, author = {Pan, X and Wang, Z and Feng, G and Wang, S and Samiappan, S}, title = {Automated mapping of land cover in Google Earth Engine platform using multispectral Sentinel-2 and MODIS image products.}, journal = {PloS one}, volume = {20}, number = {4}, pages = {e0312585}, pmid = {40193364}, issn = {1932-6203}, mesh = {Support Vector Machine ; *Satellite Imagery/methods ; Forests ; *Environmental Monitoring/methods ; *Image Processing, Computer-Assisted/methods ; }, abstract = {Land cover mapping often utilizes supervised classification, which can have issues with insufficient sample size and sample confusion, this study assessed the accuracy of a fast and reliable method for automatic labeling and collection of training samples. Based on the self-programming in Google Earth Engine (GEE) cloud-based platform, a large and reliable training dataset of multispectral Sentinel-2 image was extracted automatically across the study area from the existing MODIS land cover product. To enhance confidence in high-quality training class labels, homogeneous 20 m Sentinel-2 pixels within each 500 m MODIS pixel were selected and a minority of heterogeneous 20 m pixels were removed based on calculations of spectral centroid and Euclidean distance. Further, the quality control and spatial filter were applied for all land cover classes to generate a reliable and representative training dataset that was subsequently applied to train the Classification and Regression Tree (CART), Random Forest (RF), and Support Vector Machine (SVM) classifiers. The results shows that the main land cover types in the study area as distinguished by three different classifiers were Evergreen Broadleaf Forests, Mixed Forests, Woody Savannas, and Croplands. In the training and validation samples, the numbers of correctly classified pixels under the CART without computationally intensive were more than those for the RF and SVM classifiers. Moreover, the user's and producer's accuracies, overall accuracy and kappa coefficient of the CART classifier were the best, indicating the CART classifier was more suitable to this automatic workflow for land cover mapping. The proposed method can automatically generate a large number of reliable and accurate training samples in a timely manner, which is promising for future land cover mapping in a large-scale region.}, } @article {pmid40188709, year = {2025}, author = {Pantic, IV and Mugosa, S}, title = {Artificial intelligence strategies based on random forests for detection of AI-generated content in public health.}, journal = {Public health}, volume = {242}, number = {}, pages = {382-387}, doi = {10.1016/j.puhe.2025.03.029}, pmid = {40188709}, issn = {1476-5616}, mesh = {Humans ; *Artificial Intelligence ; *Public Health ; *Machine Learning ; *Supervised Machine Learning ; Random Forest ; }, abstract = {OBJECTIVES: To train and test a Random Forest machine learning model with the ability to distinguish AI-generated from human-generated textual content in the domain of public health, and public health policy.

STUDY DESIGN: Supervised machine learning study.

METHODS: A dataset comprising 1000 human-generated and 1000 AI-generated paragraphs was created. Textual features were extracted using TF-IDF vectorization which calculates term frequency (TF) and Inverse document frequency (IDF), and combines the two measures to produce a score for individual terms. The Random Forest model was trained and tested using the Scikit-Learn library and Jupyter Notebook service in the Google Colab cloud-based environment, with Google CPU hardware acceleration.

RESULTS: The model achieved a classification accuracy of 81.8 % and an area under the ROC curve of 0.9. For human-generated content, precision, recall, and F1-score were 0.85, 0.78, and 0.81, respectively. For AI-generated content, these metrics were 0.79, 0.86, and 0.82. The MCC value of 0.64 indicated moderate to strong predictive power. The model demonstrated robust sensitivity (recall for AI-generated class) of 0.86 and specificity (recall for human-generated class) of 0.78.

CONCLUSIONS: The model exhibited acceptable performance, as measured by classification accuracy, area under the receiver operating characteristic curve, and other metrics. This approach can be further improved by incorporating additional supervised machine learning techniques and serves as a foundation for the future development of a sophisticated and innovative AI system. Such a system could play a crucial role in combating misinformation and enhancing public trust across various government platforms, media outlets, and social networks.}, } @article {pmid40182548, year = {2025}, author = {Jin, X and Deng, A and Fan, Y and Ma, K and Zhao, Y and Wang, Y and Zheng, K and Zhou, X and Lu, G}, title = {Diversity, functionality, and stability: shaping ecosystem multifunctionality in the successional sequences of alpine meadows and alpine steppes on the Qinghai-Tibet Plateau.}, journal = {Frontiers in plant science}, volume = {16}, number = {}, pages = {1436439}, pmid = {40182548}, issn = {1664-462X}, abstract = {Recent investigations on the Tibetan Plateau have harnessed advancements in digital ground vegetation surveys, high temporal resolution remote sensing data, and sophisticated cloud computing technologies to delineate successional dynamics between alpine meadows and alpine steppes. However, these efforts have not thoroughly explored how different successional stages affect key ecological parameters, such as species and functional diversity, stability, and ecosystem multifunctionality, which are fundamental to ecosystem resilience and adaptability. Given this gap, we systematically investigate variations in vegetation diversity, functional diversity, and the often-overlooked dimension of community stability across the successional gradient from alpine meadows to alpine steppes. We further identify the primary environmental drivers of these changes and evaluate their collective impact on ecosystem multifunctionality. Our analysis reveals that, as vegetation communities progress from alpine meadows toward alpine steppes, multi-year average precipitation and temperature decline significantly, accompanied by reductions in soil nutrients. These environmental shifts led to decreased species diversity, driven by lower precipitation and reduced soil nitrate-nitrogen levels, as well as community differentiation influenced by declining soil pH and precipitation. Consequently, as species loss and community differentiation intensified, these changes diminished functional diversity and eroded community resilience and resistance, ultimately reducing grassland ecosystem multifunctionality. Using linear mixed-effects model and structural equation modeling, we found that functional diversity is the foremost determinant of ecosystem multifunctionality, followed by species diversity. Surprisingly, community stability also significantly influences ecosystem multifunctionality-a factor rarely highlighted in previous studies. These findings deepen our understanding of the interplay among diversity, functionality, stability, and ecosystem multifunctionality, and support the development of an integrated feedback model linking environmental drivers with ecological attributes in alpine grassland ecosystems.}, } @article {pmid40175456, year = {2025}, author = {Zonghui, W and Veniaminovna, KO and Vladimirovna, VO and Ivan, K and Isleem, HF}, title = {Sustainability in construction economics as a barrier to cloud computing adoption in small-scale Building projects.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {11329}, pmid = {40175456}, issn = {2045-2322}, abstract = {The application of intelligent technology to enhance decision-making, optimize processes, and boost project economics and sustainability has the potential to significantly revolutionize the construction industry. However, there are several barriers to its use in small-scale construction projects in China. This study aims to identify these challenges and provide solutions. Using a mixed-methods approach that incorporates quantitative analysis, structural equation modeling, and a comprehensive literature review, the study highlights key problems. These include specialized challenges, difficulty with data integration, financial and cultural constraints, privacy and ethical issues, limited data accessibility, and problems with scalability and connection. The findings demonstrate how important it is to get rid of these barriers to fully utilize intelligent computing in the construction sector. There are recommendations and practical strategies provided to help industry participants get over these challenges. Although the study's geographical emphasis and cross-sectional approach are limitations, they also offer opportunities for further investigation. This study contributes significantly to the growing body of knowledge on intelligent computing in small-scale construction projects and offers practical guidance on how businesses might leverage their transformative potential.}, } @article {pmid40175409, year = {2025}, author = {Fabrizi, A and Fiener, P and Jagdhuber, T and Van Oost, K and Wilken, F}, title = {Plasticulture detection at the country scale by combining multispectral and SAR satellite data.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {11339}, pmid = {40175409}, issn = {2045-2322}, abstract = {The use of plastic films has been growing in agriculture, benefiting consumers and producers. However, concerns have been raised about the environmental impact of plastic film use, with mulching films posing a greater threat than greenhouse films. This calls for large-scale monitoring of different plastic film uses. We used cloud computing, freely available optical and radar satellite images, and machine learning to map plastic-mulched farmland (PMF) and plastic cover above vegetation (PCV) (e.g., greenhouse, tunnel) across Germany. The algorithm detected 103 10[3] ha of PMF and 37 10[3] ha of PCV in 2020, while a combination of agricultural statistics and surveys estimated a smaller plasticulture cover of around 100 10[3] ha in 2019. Based on ground observations, the overall accuracy of the classification is 85.3%. Optical and radar features had similar importance scores, and a distinct backscatter of PCV was related to metal frames underneath the plastic films. Overall, the algorithm achieved great results in the distinction between PCV and PMF. This study maps different plastic film uses at a country scale for the first time and sheds light on the high potential of freely available satellite data for continental monitoring.}, } @article {pmid40175046, year = {2025}, author = {Suveena, S and Rekha, AA and Rani, JR and V Oommen, O and Ramakrishnan, R}, title = {The translational impact of bioinformatics on traditional wet lab techniques.}, journal = {Advances in pharmacology (San Diego, Calif.)}, volume = {103}, number = {}, pages = {287-311}, doi = {10.1016/bs.apha.2025.01.012}, pmid = {40175046}, issn = {1557-8925}, mesh = {*Computational Biology/methods ; Humans ; Animals ; *Translational Research, Biomedical/methods ; }, abstract = {Bioinformatics has taken a pivotal place in the life sciences field. Not only does it improve, but it also fine-tunes and complements the wet lab experiments. It has been a driving force in the so-called biological sciences, converting them into hypothesis and data-driven fields. This study highlights the translational impact of bioinformatics on experimental biology and discusses its evolution and the advantages it has brought to advancing biological research. Computational analyses make labor-intensive wet lab work cost-effective by reducing the use of expensive reagents. Genome/proteome-wide studies have become feasible due to the efficiency and speed of bioinformatics tools, which can hardly be compared with wet lab experiments. Computational methods provide the scalability essential for manipulating large and complex data of biological origin. AI-integrated bioinformatics studies can unveil important biological patterns that traditional approaches may otherwise overlook. Bioinformatics contributes to hypothesis formation and experiment design, which is pivotal for modern-day multi-omics and systems biology studies. Integrating bioinformatics in the experimental procedures increases reproducibility and helps reduce human errors. Although today's AI-integrated bioinformatics predictions have significantly improved in accuracy over the years, wet lab validation is still unavoidable for confirming these predictions. Challenges persist in multi-omics data integration and analysis, AI model interpretability, and multiscale modeling. Addressing these shortcomings through the latest developments is essential for advancing our knowledge of disease mechanisms, therapeutic strategies, and precision medicine.}, } @article {pmid40175036, year = {2025}, author = {Das, IJ and Bhatta, K and Sarangi, I and Samal, HB}, title = {Innovative computational approaches in drug discovery and design.}, journal = {Advances in pharmacology (San Diego, Calif.)}, volume = {103}, number = {}, pages = {1-22}, doi = {10.1016/bs.apha.2025.01.006}, pmid = {40175036}, issn = {1557-8925}, mesh = {*Drug Discovery/methods ; *Drug Design ; Humans ; Animals ; Machine Learning ; *Computational Biology/methods ; }, abstract = {In the current scenario of pandemics, drug discovery and design have undergone a significant transformation due to the integration of advanced computational methodologies. These methodologies utilize sophisticated algorithms, machine learning, artificial intelligence, and high-performance computing to expedite the drug development process, enhances accuracy, and reduces costs. Machine learning and AI have revolutionized predictive modeling, virtual screening, and de novo drug design, allowing for the identification and optimization of novel compounds with desirable properties. Molecular dynamics simulations provide a detailed insight into protein-ligand interactions and conformational changes, facilitating an understanding of drug efficacy at the atomic level. Quantum mechanics/molecular mechanics methods offer precise predictions of binding energies and reaction mechanisms, while structure-based drug design employs docking studies and fragment-based design to improve drug-receptor binding affinities. Network pharmacology and systems biology approaches analyze polypharmacology and biological networks to identify novel drug targets and understand complex interactions. Cheminformatics explores vast chemical spaces and employs data mining to find patterns in large datasets. Computational toxicology predicts adverse effects early in development, reducing reliance on animal testing. Bioinformatics integrates genomic, proteomic, and metabolomics data to discover biomarkers and understand genetic variations affecting drug response. Lastly, cloud computing and big data technologies facilitate high-throughput screening and comprehensive data analysis. Collectively, these computational innovations are driving a paradigm shift in drug discovery and design, making it more efficient, accurate, and cost-effective.}, } @article {pmid40169696, year = {2025}, author = {Erukala, SB and Tokmakov, D and Perumalla, A and Kaluri, R and Bekyarova-Tokmakova, A and Mileva, N and Lubomirov, S}, title = {A secure end-to-end communication framework for cooperative IoT networks using hybrid blockchain system.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {11077}, pmid = {40169696}, issn = {2045-2322}, support = {project No.BG-RRP-2.004-0001-C01//The European Union-NextGeneration EU , Republic of Bulgaria,/ ; }, abstract = {The Internet of Things (IoT) is a disruptive technology that underpins Industry 5.0 by integrating various service technologies to enable intelligent connectivity among smart objects. These technologies enhance the convergence of Information Technology (IT), Operational Technology (OT), Core Technology (CT), and Data Technology (DT) networks, improving automation and decision-making capabilities. While cloud computing has become a mainstream technology across multiple domains, it struggles to efficiently manage the massive volume of OT data generated by IoT devices due to high latency, data transfer costs, limited resilience, and insufficient context awareness. Fog computing has emerged as a viable solution, extending cloud capabilities to the edge through a distributed peer-to-peer (P2P) network, enabling decentralized data processing and management. However, IoT networks still face critical challenges, including connectivity, heterogeneity, scalability, interoperability, security, and real-time decision-making constraints. Security is a key challenge in IoT implementations, including secure data communication, IoT edge and fog device identity, end-to-end authentication, and secure storage. This paper presents an efficient blockchain-based framework that creates a secure end-to-end communication cooperative flow IoT network. The framework utilizes a hybrid blockchain network that collaborates to offer a collaborative flow of end-to-end secure communication from end devices to cloud storage. The fog servers will maintain a private blockchain as a next-generation public key infrastructure to identify and authenticate the IoT's edge devices. The consortium blockchain will be maintained in the cloud and integrated with the permission blockchain system. This system ensures secure cloud storage, authorization, efficient key exchange, and remote protection (encryption) of all sensitive information. To improve the synchronization and block generation, reduce overhead, and ensure scalable IoT network operation, we proposed the threshold signature-based Proof of Stake and Validation (PoSV) consensus. Additionally, lightweight authentication protects resource-constrained IoT nodes using an aggregate signature, ensuring security and performance in real-time scenarios. The proposed system is implemented, and its performance is evaluated using key metrics such as cryptographic processing overhead, consensus efficiency, block acceptance time, and transaction delay. The findings show that threshold signature-based Proof of Stake and Validation (PoSV) consensus, reduces the computational burden of individual signature verification, which results in an optimized transaction latency of 80-150 ms, compared to the previous 100-200 ms without Non-PoSV. Additionally, aggregating multiple signatures from different authentication events reduces signing time by 1.98 ms compared to the individual signature time of 2.72 ms and the overhead of verifying multiple individual transactions is 2.87 ms is significantly reduced to1.46 ms along with authentication delay ranges between 95-180 ms. Hence, the proposed framework improves over existing approaches regarding linear computing complexity, increased cryptographic methods, and a more efficient consensus process.}, } @article {pmid40169667, year = {2025}, author = {Li, X and Shen, T and Garcia, CL and Teich, I and Chen, Y and Chen, J and Kabo-Bah, AT and Yang, Z and Jia, X and Lu, Q and Nyamtseren, M}, title = {A 30-meter resolution global land productivity dynamics dataset from 2013 to 2022.}, journal = {Scientific data}, volume = {12}, number = {1}, pages = {555}, pmid = {40169667}, issn = {2052-4463}, abstract = {Land degradation is one of the most severe environmental challenges globally. To address its adverse impacts, the United Nations endorsed the Land Degradation Neutrality (SDG 15.3) within the Sustainable Development Goals in 2015. Trends in land productivity is a key sub-indicator for reporting the progress toward SDG 15.3. Currently, the highest spatial resolution of global land productivity dynamics (LPD) products is 250-meter, which seriously hamper the SDG 15.3 reporting and intervention at the fine scale. Generating higher spatial resolution product faces significant challenges, including massive data processing, image cloud pollution, incompatible spatiotemporal resolution. This study, leveraging Google Earth Engine platform and utilizing Landsat-8 and MODIS imagery, employed the Gap-filling and Savitzky-Golay filtering algorithm and advanced spatiotemporal filtering method to obtain a high-quality 30-meter NDVI dataset, then the global 30-meter LPD product from 2013 to 2022 was generated by using the FAO-WOCAT methodology and compared against multiple datasets. This is the first global scale 30-meter LPD dataset, which provides essential data support for SDG 15.3 monitoring and reporting globally.}, } @article {pmid40168238, year = {2025}, author = {Hao, R and Zhao, Y and Zhang, S and Deng, X}, title = {Deep Learning for Ocean Forecasting: A Comprehensive Review of Methods, Applications, and Datasets.}, journal = {IEEE transactions on cybernetics}, volume = {PP}, number = {}, pages = {}, doi = {10.1109/TCYB.2025.3539990}, pmid = {40168238}, issn = {2168-2275}, abstract = {As a longstanding scientific challenge, accurate and timely ocean forecasting has always been a sought-after goal for ocean scientists. However, traditional theory-driven numerical ocean prediction (NOP) suffers from various challenges, such as the indistinct representation of physical processes, inadequate application of observation assimilation, and inaccurate parameterization of models, which lead to difficulties in obtaining effective knowledge from massive observations, and enormous computational challenges. With the successful evolution of data-driven deep learning in various domains, it has been demonstrated to mine patterns and deep insights from the ever-increasing stream of oceanographic spatiotemporal data, which provides novel possibilities for revolution in ocean forecasting. Deep-learning-based ocean forecasting (DLOF) is anticipated to be a powerful complement to NOP. Nowadays, researchers attempt to introduce deep learning into ocean forecasting and have achieved significant progress that provides novel motivations for ocean science. This article provides a comprehensive review of the state-of-the-art DLOF research regarding model architectures, spatiotemporal multiscales, and interpretability while specifically demonstrating the feasibility of developing hybrid architectures that incorporate theory-driven and data-driven models. Moreover, we comprehensively evaluate DLOF from datasets, benchmarks, and cloud computing. Finally, the limitations of current research and future trends of DLOF are also discussed and prospected.}, } @article {pmid40167712, year = {2025}, author = {Zhu, X and Lu, Y and Chen, Y and Wang, F and Dou, C and Ju, W}, title = {Optical identification of marine floating debris from Sentinel-2 MSI imagery using radiation signal difference.}, journal = {Optics letters}, volume = {50}, number = {7}, pages = {2330-2333}, doi = {10.1364/OL.554994}, pmid = {40167712}, issn = {1539-4794}, abstract = {A spaceborne optical technique for marine floating debris is developed to detect, discriminate, and quantify such debris, especially that with weak optical signals. The technique uses only the top-of-atmosphere (TOA) signal based on the difference radiative transfer (DRT). DRT unveils diverse optical signals by referencing those within the neighborhood. Using DRT of either simulated signals or Sentinel-2 Multispectral Instrument (MSI) data, target types can be confirmed between the two and pinpointed on a normalized type line. The line, mostly, indicates normalized values of <0.2 for waters, 0.2-0.6 for debris, and >0.8 for algae. The classification limit for MSI is a sub-pixel fraction of 3%; above which, the boundary between debris and algae is distinct, being separated by >three standard deviations. This automated methodology unleashed TOA imagery on data cloud platforms such as Google Earth Engine (GEE) and promoted monitoring after coastal disasters, such as debris dumping and algae blooms.}, } @article {pmid40164608, year = {2025}, author = {Jia, Z and Fan, S and Wang, Z and Shao, S and He, D}, title = {Partial discharge defect recognition method of switchgear based on cloud-edge collaborative deep learning.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {10956}, pmid = {40164608}, issn = {2045-2322}, support = {52199719000X//the Research Project of State Grid Sichuan Electric Power Company/ ; }, abstract = {To address the limitations of traditional partial discharge (PD) detection methods for switchgear, which fail to meet the requirements for real-time monitoring, rapid assessment, sample fusion, and joint analysis in practical applications, a joint PD recognition method of switchgear based on edge computing and deep learning is proposed. An edge collaborative defect identification architecture for switchgear is constructed, which includes the terminal device side, terminal collection side, edge-computing side, and cloud-computing side. The PD signal of switchgear is extracted based on UHF sensor and broadband pulse current sensor on the terminal collection side. Multidimensional features are obtained from these signals and a high-dimensional feature space is constructed based on feature extraction and dimensionality reduction on the edge-computing side. On the cloud side, the deep belief network (DBN)-based switchgear PD defect identification method is proposed and the PD samples acquired on the edge side are transmitted in real time to the cloud for training. Upon completion of the training, the resulting model is transmitted back to the edge side for inference, thereby facilitating real-time joint analysis of PD defects across multiple switchgear units. Verification of the proposed method is conducted using PD samples simulated in the laboratory. The results indicate that the DBN proposed in this paper can recognize PDs in switchgear with an accuracy of 88.03%, and under the edge computing architecture, the training time of the switchgear PD defect type classifier can be reduced by 44.28%, overcoming the challenges associated with traditional diagnostic models, which are characterized by long training durations, low identification efficiency, and weak collaborative analysis capabilities.}, } @article {pmid40162168, year = {2025}, author = {Yang, H and Jiang, L}, title = {Regulating neural data processing in the age of BCIs: Ethical concerns and legal approaches.}, journal = {Digital health}, volume = {11}, number = {}, pages = {20552076251326123}, pmid = {40162168}, issn = {2055-2076}, abstract = {Brain-computer interfaces (BCIs) have seen increasingly fast growth under the help from AI, algorithms, and cloud computing. While providing great benefits for both medical and educational purposes, BCIs involve processing of neural data which are uniquely sensitive due to their most intimate nature, posing unique risks and ethical concerns especially related to privacy and safe control of our neural data. In furtherance of human right protection such as mental privacy, data laws provide more detailed and enforceable rules for processing neural data which may balance the tension between privacy protection and need of the public for wellness promotion and scientific progress through data sharing. This article notes that most of the current data laws like GDPR have not covered neural data clearly, incapable of providing full protection in response to its specialty. The new legislative reforms in the U.S. states of Colorado and California made pioneering advances to incorporate neural data into data privacy laws. Yet regulatory gaps remain as such reforms have not provided special additional rules for neural data processing. Potential problems such as static consent, vague research exceptions, and loopholes in regulating non-personal neural data need to be further addressed. We recommend relevant improved measures taken through amending data laws or making special data acts.}, } @article {pmid40155754, year = {2025}, author = {Bai, CM and Shu, YX and Zhang, S}, title = {Authenticable quantum secret sharing based on special entangled state.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {10819}, pmid = {40155754}, issn = {2045-2322}, support = {12301590//National Natural Science Foundation of China/ ; BJ2025061//Science Research Project of Hebei Education Department/ ; }, abstract = {In this paper, a pair of quantum states are constructed based on an orthogonal array and further generalized to multi-body quantum systems. Subsequently, a novel physical process is designed, which is aimed at effectively masking quantum states within multipartite quantum systems. According to this masker, a new authenticable quantum secret sharing scheme is proposed, which can realize a class of special access structures. In the distribution phase, an unknown quantum state is shared safely among multiple participants, and this secret quantum state is embedded into a multi-particle entangled state using the masking approach. In the reconstruction phase, a series of precisely designed measurements and corresponding unitary operations are performed by the participants in the authorized set to restore the original information quantum state. To ensure the security of the scheme, the security analysis of five major types of quantum attacks is conducted. Finally, when compared with other quantum secret sharing schemes based on entangled states, the proposed scheme is found to be not only more flexible but also easier to implement based on existing quantum computing cloud platforms.}, } @article {pmid40151796, year = {2023}, author = {Davey, BC and Billingham, W and Davis, JA and Gibson, L and D'Vaz, N and Prescott, SL and Silva, DT and Whalan, S}, title = {Data resource profile: the ORIGINS project databank: a collaborative data resource for investigating the developmental origins of health and disease.}, journal = {International journal of population data science}, volume = {8}, number = {1}, pages = {2388}, pmid = {40151796}, issn = {2399-4908}, mesh = {Humans ; Female ; Pregnancy ; Infant ; Child, Preschool ; Longitudinal Studies ; Infant, Newborn ; Western Australia ; *Databases, Factual ; Birth Cohort ; Male ; Noncommunicable Diseases/epidemiology ; }, abstract = {INTRODUCTION: The ORIGINS Project ("ORIGINS") is a longitudinal, population-level birth cohort with data and biosample collections that aim to facilitate research to reduce non-communicable diseases (NCDs) and encourage 'a healthy start to life'. ORIGINS has gathered millions of datapoints and over 400,000 biosamples over 15 timepoints, antenatally through to five years of age, from mothers, non-birthing partners and the child, across four health and wellness domains: 'Growth and development', 'Medical, biological and genetic', 'Biopsychosocial and cognitive', 'Lifestyle, environment and nutrition'.

METHODS: Mothers, non-birthing partners and their offspring were recruited antenatally (between 18 and 38 weeks' gestation) from the Joondalup and Wanneroo communities of Perth, Western Australia from 2017 to 2024. Data come from several sources, including routine hospital antenatal and birthing data, ORIGINS clinical appointments, and online self-completed surveys comprising several standardised measures. Data are merged using the Medical Record Number (MRN), the ORIGINS Unique Identifier and the ORIGINS Pregnancy Number, as well as additional demographic data (e.g. name and date of birth) when necessary.

RESULTS: The data are held on an integrated data platform that extracts, links, ingests, integrates and stores ORIGINS' data on an Amazon Web Services (AWS) cloud-based data warehouse. Data are linked, transformed for cleaning and coding, and catalogued, ready to provide to sub-projects (independent researchers that apply to use ORIGINS data) to prepare for their own analyses. ORIGINS maximises data quality by checking and replacing missing and erroneous data across the various data sources.

CONCLUSION: As a wide array of data across several different domains and timepoints has been collected, the options for future research and utilisation of the data and biosamples are broad. As ORIGINS aims to extend into middle childhood, researchers can examine which antenatal and early childhood factors predict middle childhood outcomes. ORIGINS also aims to link to State and Commonwealth data sets (e.g. Medicare, the National Assessment Program - Literacy and Numeracy, the Pharmaceutical Benefits Scheme) which will cater to a wide array of research questions.}, } @article {pmid40151450, year = {2025}, author = {Steiner, M and Huettmann, F}, title = {Moving beyond the physical impervious surface impact and urban habitat fragmentation of Alaska: quantitative human footprint inference from the first large scale 30 m high-resolution Landscape metrics big data quantification in R and the cloud.}, journal = {PeerJ}, volume = {13}, number = {}, pages = {e18894}, pmid = {40151450}, issn = {2167-8359}, mesh = {Alaska ; Humans ; *Ecosystem ; *Urbanization ; Big Data ; Conservation of Natural Resources/methods ; Climate Change ; Remote Sensing Technology ; }, abstract = {With increased globalization, man-made climate change, and urbanization, the landscape-embedded within the Anthropocene-becomes increasingly fragmented. With wilderness habitats transitioning and getting lost, globally relevant regions considered 'pristine', such as Alaska, are no exception. Alaska holds 60% of the U.S. National Park system's area and is of national and international importance, considering the U.S. is one of the wealthiest nations on earth. These characteristics tie into densities and quantities of human features, e.g., roads, houses, mines, wind parks, agriculture, trails, etc., that can be summarized as 'impervious surfaces.' Those are physical impacts and actively affecting urban-driven landscape fragmentation. Using the remote sensing data of the National Land Cover Database (NLCD), here we attempt to create the first quantification of this physical human impact on the Alaskan landscape and its fragmentation. We quantified these impacts using the well-established landscape metrics tool 'Fragstats', implemented as the R package "landscapemetrics" in the desktop software and through the interface of a Linux Cloud-computing environment. This workflow allows for the first time to overcome the computational limitations of the conventional Fragstats software within a reasonably quick timeframe. Thereby, we are able to analyze a land area as large as approx. 1,517,733 km[2] (state of Alaska) while maintaining a high assessment resolution of 30 m. Based on this traditional methodology, we found that Alaska has a reported physical human impact of c. 0.067%. We additionally overlaid other features that were not included in the input data to highlight the overall true human impact (e.g., roads, trails, airports, governance boundaries in game management and park units, mines, etc.). We found that using remote sensing (human impact layers), Alaska's human impact is considerably underestimated to a meaningless estimate. The state is more seriously fragmented and affected by humans than commonly assumed. Very few areas are truly untouched and display a high patch density with corresponding low mean patch sizes throughout the study area. Instead, the true human impact is likely close to 100% throughout Alaska for several metrics. With these newly created insights, we provide the first state-wide landscape data and inference that are likely of considerable importance for land management entities in the state of Alaska, and for the U.S. National Park systems overall, especially in the changing climate. Likewise, the methodological framework presented here shows an Open Access workflow and can be used as a reference to be reproduced virtually anywhere else on the planet to assess more realistic large-scale landscape metrics. It can also be used to assess human impacts on the landscape for more sustainable landscape stewardship and mitigation in policy.}, } @article {pmid40144736, year = {2025}, author = {Chaikovsky, I and Dziuba, D and Kryvova, O and Marushko, K and Vakulenko, J and Malakhov, K and Loskutov, О}, title = {Subtle changes on electrocardiogram in severe patients with COVID-19 may be predictors of treatment outcome.}, journal = {Frontiers in artificial intelligence}, volume = {8}, number = {}, pages = {1561079}, pmid = {40144736}, issn = {2624-8212}, abstract = {BACKGROUND: Two years after the COVID-19 pandemic, it became known that one of the complications of this disease is myocardial injury. Electrocardiography (ECG) and cardiac biomarkers play a vital role in the early detection of cardiovascular complications and risk stratification. The study aimed to investigate the value of a new electrocardiographic metric for detecting minor myocardial injury in patients during COVID-19 treatment.

METHODS: The study was conducted in 2021. A group of 26 patients with verified COVID-19 diagnosis admitted to the intensive care unit for infectious diseases was examined. The severity of a patient's condition was calculated using the NEWS score. The digital ECGs were repeatedly recorded (at the beginning and 2-4 times during the treatment). A total of 240 primary and composite ECG parameters were analyzed for each electrocardiogram. Among these patients, 6 patients died during treatment. Cluster analysis was used to identify subgroups of patients that differed significantly in terms of disease severity (NEWS), SрО2 and integral ECG index (an indicator of the state of the cardiovascular system).

RESULTS: Using analysis of variance (ANOVA repeated measures), a statistical assessment of changes of indicators in subgroups at the end of treatment was given. These subgroup differences persisted at the end of the treatment. To identify potential predictors of mortality, critical clinical and ECG parameters of surviving (S) and non-surviving patients (D) were compared using parametric and non-parametric statistical tests. A decision tree model to classify survival in patients with COVID-19 was constructed based on partial ECG parameters and NEWS score.

CONCLUSION: A comparison of potential mortality predictors showed no significant differences in vital signs between survivors and non-survivors at the beginning of treatment. A set of ECG parameters was identified that were significantly associated with treatment outcomes and may be predictors of COVID-19 mortality: T-wave morphology (SVD), Q-wave amplitude, and R-wave amplitude (lead I).}, } @article {pmid40142954, year = {2025}, author = {Kodumuru, R and Sarkar, S and Parepally, V and Chandarana, J}, title = {Artificial Intelligence and Internet of Things Integration in Pharmaceutical Manufacturing: A Smart Synergy.}, journal = {Pharmaceutics}, volume = {17}, number = {3}, pages = {}, pmid = {40142954}, issn = {1999-4923}, abstract = {Background: The integration of artificial intelligence (AI) with the internet of things (IoTs) represents a significant advancement in pharmaceutical manufacturing and effectively bridges the gap between digital and physical worlds. With AI algorithms integrated into IoTs sensors, there is an improvement in the production process and quality control for better overall efficiency. This integration facilitates enabling machine learning and deep learning for real-time analysis, predictive maintenance, and automation-continuously monitoring key manufacturing parameters. Objective: This paper reviews the current applications and potential impacts of integrating AI and the IoTs in concert with key enabling technologies like cloud computing and data analytics, within the pharmaceutical sector. Results: Applications discussed herein focus on industrial predictive analytics and quality, underpinned by case studies showing improvements in product quality and reductions in downtime. Yet, many challenges remain, including data integration and the ethical implications of AI-driven decisions, and most of all, regulatory compliance. This review also discusses recent trends, such as AI in drug discovery and blockchain for data traceability, with the intent to outline the future of autonomous pharmaceutical manufacturing. Conclusions: In the end, this review points to basic frameworks and applications that illustrate ways to overcome existing barriers to production with increased efficiency, personalization, and sustainability.}, } @article {pmid40134869, year = {2025}, author = {Hussain, A and Aleem, M and Ur Rehman, A and Arshad, U}, title = {DE-RALBA: dynamic enhanced resource aware load balancing algorithm for cloud computing.}, journal = {PeerJ. Computer science}, volume = {11}, number = {}, pages = {e2739}, pmid = {40134869}, issn = {2376-5992}, abstract = {Cloud computing provides an opportunity to gain access to the large-scale and high-speed resources without establishing your own computing infrastructure for executing the high-performance computing (HPC) applications. Cloud has the computing resources (i.e., computation power, storage, operating system, network, and database etc.) as a public utility and provides services to the end users on a pay-as-you-go model. From past several years, the efficient utilization of resources on a compute cloud has become a prime interest for the scientific community. One of the key reasons behind inefficient resource utilization is the imbalance distribution of workload while executing the HPC applications in a heterogenous computing environment. The static scheduling technique usually produces lower resource utilization and higher makespan, while the dynamic scheduling achieves better resource utilization and load-balancing by incorporating a dynamic resource pool. The dynamic techniques lead to increased overhead by requiring a continuous system monitoring, job requirement assessments and real-time allocation decisions. This additional load has the potential to impact the performance and responsiveness on computing system. In this article, a dynamic enhanced resource-aware load balancing algorithm (DE-RALBA) is proposed to mitigate the load-imbalance in job scheduling by considering the computing capabilities of all VMs in cloud computing. The empirical assessments are performed on CloudSim simulator using instances of two scientific benchmark datasets (i.e., heterogeneous computing scheduling problems (HCSP) instances and Google Cloud Jobs (GoCJ) dataset). The obtained results revealed that the DE-RALBA mitigates the load imbalance and provides a significant improvement in terms of makespan and resource utilization against existing algorithms, namely PSSLB, PSSELB, Dynamic MaxMin, and DRALBA. Using HCSP instances, the DE-RALBA algorithm achieves up to 52.35% improved resources utilization as compared to existing technique, while more superior resource utilization is achieved using the GoCJ dataset.}, } @article {pmid40134463, year = {2025}, author = {Ramezani, R and Iranmanesh, S and Naeim, A and Benharash, P}, title = {Editorial: Bench to bedside: AI and remote patient monitoring.}, journal = {Frontiers in digital health}, volume = {7}, number = {}, pages = {1584443}, doi = {10.3389/fdgth.2025.1584443}, pmid = {40134463}, issn = {2673-253X}, } @article {pmid40133794, year = {2025}, author = {Evangelista, JE and Ali-Nasser, T and Malek, LE and Xie, Z and Marino, GB and Bester, AC and Ma'ayan, A}, title = {lncRNAlyzr: Enrichment Analysis for lncRNA Sets.}, journal = {Journal of molecular biology}, volume = {}, number = {}, pages = {168938}, doi = {10.1016/j.jmb.2025.168938}, pmid = {40133794}, issn = {1089-8638}, support = {OT2 OD036435/OD/NIH HHS/United States ; R01 DK131525/DK/NIDDK NIH HHS/United States ; U24 CA271114/CA/NCI NIH HHS/United States ; }, abstract = {lncRNAs make up a large portion of the human genome affecting many biological processes in normal physiology and diseases. However, human lncRNAs are understudied compared to protein-coding genes. While there are many tools for performing gene set enrichment analysis for coding genes, few tools exist for lncRNA enrichment analysis. lncRNAlyzr is a webserver application designed for lncRNAs enrichment analysis. lncRNAlyzr has a database containing 33 lncRNA set libraries created by computing correlations between lncRNAs and annotated coding gene sets. After users submit a set of lncRNAs to lncRNAlyzr, the enrichment analysis results are visualized as ball-and-stick subnetworks where nodes are lncRNAs connected to enrichment terms from across selected lncRNA set libraries. To demonstrate lncRNAlyzr, it was used to analyze the effects of knocking down the lncRNA CYTOR in K562 cells. Overall, lncRNAlyzr is an enrichment analysis tool for lncRNAs aiming to further our understanding of lncRNAs functional modules. lncRNAlyzr is available from: https://lncrnalyzr.maayanlab.cloud.}, } @article {pmid40133599, year = {2025}, author = {Sng, LMF and Kaphle, A and O'Brien, MJ and Hosking, B and Reguant, R and Verjans, J and Jain, Y and Twine, NA and Bauer, DC}, title = {Optimizing UK biobank cloud-based research analysis platform to fine-map coronary artery disease loci in whole genome sequencing data.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {10335}, pmid = {40133599}, issn = {2045-2322}, mesh = {Humans ; *Coronary Artery Disease/genetics ; *Whole Genome Sequencing/methods ; *Cloud Computing ; United Kingdom ; *Biological Specimen Banks ; Polymorphism, Single Nucleotide ; Genome-Wide Association Study/methods ; Machine Learning ; Genetic Predisposition to Disease ; Male ; Female ; UK Biobank ; }, abstract = {We conducted the first comprehensive association analysis of a coronary artery disease (CAD) cohort within the recently released UK Biobank (UKB) whole genome sequencing dataset. We employed fine mapping tool PolyFun and pinpoint rs10757274 as the most likely causal SNV within the 9p21.3 CAD risk locus. Notably, we show that machine-learning (ML) approaches, REGENIE and VariantSpark, exhibited greater sensitivity compared to traditional single-SNV logistic regression, uncovering rs28451064 a known risk locus in 21q22.11. Our findings underscore the utility of leveraging advanced computational techniques and cloud-based resources for mega-biobank analyses. Aligning with the paradigm shift of bringing compute to data, we demonstrate a 44% cost reduction and 94% speedup through compute architecture optimisation on UK Biobank's Research Analysis Platform using our RAPpoet approach. We discuss three considerations for researchers implementing novel workflows for datasets hosted on cloud-platforms, to pave the way for harnessing mega-biobank-sized data through scalable, cost-effective cloud computing solutions.}, } @article {pmid40111379, year = {2025}, author = {Madan, B and Nair, S and Katariya, N and Mehta, A and Gogte, P}, title = {Smart waste management and air pollution forecasting: Harnessing Internet of things and fully Elman neural network.}, journal = {Waste management & research : the journal of the International Solid Wastes and Public Cleansing Association, ISWA}, volume = {}, number = {}, pages = {734242X241313286}, doi = {10.1177/0734242X241313286}, pmid = {40111379}, issn = {1096-3669}, abstract = {As the Internet of things (IoT) continues to transform modern technologies, innovative applications in waste management and air pollution monitoring are becoming critical for sustainable development. In this manuscript, a novel smart waste management (SWM) and air pollution forecasting (APF) system is proposed by leveraging IoT sensors and the fully Elman neural network (FENN) model, termed as SWM-APF-IoT-FENN. The system integrates real-time data from waste and air quality sensors including weight, trash level, odour and carbon monoxide (CO) that are collected from smart bins connected to a Google Cloud Server. Here, the MaxAbsScaler is employed for data normalization, ensuring consistent feature representation. Subsequently, the atmospheric contaminants surrounding the waste receptacles were observed using a FENN model. This model is utilized to predict the atmospheric concentration of CO and categorize the bin status as filled, half-filled and unfilled. Moreover, the weight parameter of the FENN model is tuned using the secretary bird optimization algorithm for better prediction results. The implementation of the proposed methodology is done in Python tool, and the performance metrics are analysed. Experimental results demonstrate significant improvements in performance, achieving 15.65%, 18.45% and 21.09% higher accuracy, 18.14%, 20.14% and 24.01% higher F-Measure, 23.64%, 24.29% and 29.34% higher False Acceptance Rate (FAR), 25.00%, 27.09% and 31.74% higher precision, 20.64%, 22.45% and 28.64% higher sensitivity, 26.04%, 28.65% and 32.74% higher specificity, 9.45%, 7.38% and 4.05% reduced computational time than the conventional approaches such as Elman neural network, recurrent artificial neural network and long short-term memory with gated recurrent unit, respectively. Thus, the proposed method offers a streamlined, efficient framework for real-time waste management and pollution forecasting, addressing critical environmental challenges.}, } @article {pmid40108296, year = {2025}, author = {Isaac, RA and Sundaravadivel, P and Marx, VSN and Priyanga, G}, title = {Enhanced novelty approaches for resource allocation model for multi-cloud environment in vehicular Ad-Hoc networks.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {9472}, pmid = {40108296}, issn = {2045-2322}, abstract = {As the number of service requests for applications continues increasing due to various conditions, the limitations on the number of resources provide a barrier in providing the applications with the appropriate Quality of Service (QoS) assurances. As a result, an efficient scheduling mechanism is required to determine the order of handling application requests, as well as the appropriate use of a broadcast media and data transfer. In this paper an innovative approach, incorporating the Crossover and Mutation (CM)-centered Marine Predator Algorithm (MPA) is introduced for an effective resource allocation. This strategic resource allocation optimally schedules resources within the Vehicular Edge computing (VEC) network, ensuring the most efficient utilization. The proposed method begins by the meticulous feature extraction from the Vehicular network model, with attributes such as mobility patterns, transmission medium, bandwidth, storage capacity, and packet delivery ratio. For further analysis the Elephant Herding Lion Optimizer (EHLO) algorithm is employed to pinpoint the most critical attributes. Subsequently the Modified Fuzzy C-Means (MFCM) algorithm is used for efficient vehicle clustering centred on selected attributes. These clustered vehicle characteristics are then transferred and stored within the cloud server infrastructure. The performance of the proposed methodology is evaluated using MATLAB software using simulation method. This study offers a comprehensive solution to the resource allocation challenge in Vehicular Cloud Networks, addresses the burgeoning demands of modern applications while ensuring QoS assurances and signifies a significant advancement in the field of VEC.}, } @article {pmid40108264, year = {2025}, author = {Rajavel, R and Krishnasamy, L and Nagappan, P and Moorthy, U and Easwaramoorthy, SV}, title = {Cloud-enabled e-commerce negotiation framework using bayesian-based adaptive probabilistic trust management model.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {9457}, doi = {10.1038/s41598-025-92643-z}, pmid = {40108264}, issn = {2045-2322}, abstract = {Enforcing a trust management model in the broker-based negotiation context is identified as a foremost challenge. Creating such trust model is not a pure technical issue, but the technology should enhance the cloud service negotiation framework for improving the utility value and success rate between the bargaining participants (consumer, broker, and service provider) during their negotiation progression. In the existing negotiation frameworks, trusts were established using reputation, self-assessment, identity, evidence, and policy-based evaluation techniques for maximizing the negotiators (cloud participants) utility value and success rate. To further maximization, a Bayesian-based adaptive probabilistic trust management model is enforced in the future broker-based trusted cloud service negotiation framework. This adaptive model dynamically ranks the service provider agents by estimating the success rate, cooperation rate and honesty rate factors to effectively measure the trustworthiness among the participants. The measured trustworthiness value will be used by the broker agents for prioritization of trusted provider agents over the non-trusted provider agents which minimizes the bargaining conflict between the participants and enhance future bargaining progression. In addition, the proposed adaptive probabilistic trust management model formulates the sequence of bilateral negotiation process among the participants as a Bayesian learning process. Finally, the performance of the projected cloud-enabled e-commerce negotiation framework with Bayesian-based adaptive probabilistic trust management model is compared with the existing frameworks by validating under different levels of negotiation rounds.}, } @article {pmid40108031, year = {2025}, author = {Savitha, C and Talari, R}, title = {Evaluating the performance of random forest, support vector machine, gradient tree boost, and CART for improved crop-type monitoring using greenest pixel composite in Google Earth Engine.}, journal = {Environmental monitoring and assessment}, volume = {197}, number = {4}, pages = {437}, pmid = {40108031}, issn = {1573-2959}, mesh = {*Support Vector Machine ; *Environmental Monitoring/methods ; *Crops, Agricultural ; India ; *Agriculture/methods ; *Machine Learning ; *Algorithms ; Satellite Imagery ; Random Forest ; }, abstract = {The development of machine learning algorithms, along with high-resolution satellite datasets, aids in improved agriculture monitoring and mapping. Nevertheless, the use of high-resolution optical satellite datasets is usually constrained by clouds and shadows, which do not capture complete crop phenology, thus limiting map accuracy. Moreover, the identification of a suitable classification algorithm is essential, as the performance of each machine learning algorithm depends on input datasets, hyperparameter tuning, training, and testing samples, among other factors. To overcome the limitation of clouds and shadow in optical data, this study employs Sentinel-2 greenest pixel composite to generate a nearly accurate crop-type map for an agricultural watershed in Tadepalligudem, India. To identify a suitable machine learning model, the study also evaluates and compares the performance of four machine learning algorithms: gradient tree boost, classification and regression tree, support vector machine, and random forest (RF). Crop-type maps are generated for two cropping seasons, Kharif and Rabi, in Google Earth Engine (GEE), a robust cloud computing platform. Further, to train and test these algorithms, ground truth data is collected and divided in the ratio of 70:30, for training and testing, respectively. The results of the study demonstrated the ability of the greenest pixel composite method to identify and map crop types in small watersheds even during the Kharif season. Further, among the four machine learning algorithms employed, RF is shown to outperform other classification algorithms in both Kharif and Rabi seasons, with an average overall accuracy of 93.21% and a kappa coefficient of 0.89. Furthermore, the study showcases the potential of the cloud computing platform GEE in enhancing automatic agricultural monitoring through satellite datasets while requiring minimal computational storage and processing.}, } @article {pmid40106239, year = {2025}, author = {Ding, X and Liu, Y and Ning, J and Chen, D}, title = {Blockchain-Enhanced Anonymous Data Sharing Scheme for 6G-Enabled Smart Healthcare With Distributed Key Generation and Policy Hiding.}, journal = {IEEE journal of biomedical and health informatics}, volume = {PP}, number = {}, pages = {}, doi = {10.1109/JBHI.2025.3550261}, pmid = {40106239}, issn = {2168-2208}, abstract = {In recent years, cloud computing has seen widespread application in 6G-enabled smart healthcare, which facilitates the sharing of medical data. Before uploading medical data to cloud server, numerous data sharing schemes employ attribute-based encryption (ABE) to encrypt the sensitive medical data of data owner (DO), and only provide access to date user (DU) who meet certain conditions, which leads to privacy leakage and single points of failure, etc. This paper proposes a blockchain-enhanced anonymous data sharing scheme for 6G-enabled smart healthcare with distributed key generation and policy hiding, termed BADS-ABE, which achieves secure and efficient sharing of sensitive medical data. BADS-ABE designs an anonymous authentication scheme based on Groth signature, which ensures integrity of medical data and protects the identity privacy of DO. Meanwhile, BADS-ABE employs smart contract and Newton interpolation to achieve distributed key generation, which eliminates single point of failure due to the reliance on trusted authority (TA). Moreover, BADS-ABE achieves policy hiding and matching, which avoids the waste of decryption resources and protects the attribute privacy of DO. Finally, security analysis demonstrates that BADS-ABE meets the security requirements of a data sharing scheme for smart healthcare. Performance analysis indicates that BADS-ABE is more efficient compared with similar data sharing schemes.}, } @article {pmid40104062, year = {2025}, author = {Han, X and Wang, J and Wu, J and Song, J}, title = {Energy-efficient cloud systems: Virtual machine consolidation with Γ -robustness optimization.}, journal = {iScience}, volume = {28}, number = {3}, pages = {111897}, pmid = {40104062}, issn = {2589-0042}, abstract = {This study addresses the challenge of virtual machine (VM) placement in cloud computing to improve resource utilization and energy efficiency. We propose a mixed integer linear programming (MILP) model incorporating Γ -robustness theory to handle uncertainties in VM usage, optimizing both performance and energy consumption. A heuristic algorithm is developed for large-scale VM allocation. Experiments with Huawei Cloud data demonstrate significant improvements in resource utilization and energy efficiency.}, } @article {pmid40102463, year = {2025}, author = {Sarkar, C and Das, A and Jain, RK}, title = {Development of CoAP protocol for communication in mobile robotic systems using IoT technique.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {9269}, pmid = {40102463}, issn = {2045-2322}, abstract = {This paper proposes a novel design methodology of Constrained Application Protocol (CoAP) protocol for an IoT-enabled mobile robot system to operate remotely and access wirelessly. These devices can be used in different applications such as monitoring, inspection, robotics, healthcare, etc. For communicating with such devices, the different frameworks of IoT can be deployed to attain secured transmission using different protocols such as HTTP, MQTT, CoAP, etc. In this paper, the novel IoT-enabled communication using the CoAP protocol in mobile robotic systems is attempted. A mathematical analysis of the CoAP model is carried out where this protocol provides a faster response within less time and less power consumption as compared to other protocols. The main advantage of the CoAP protocol is to facilitate Machine-to-Machine (M2M) communication which contains features like small packet overhead and less power consumption. An experimental prototype has been developed and several trials have been conducted to evaluate the CoAP protocol's performance for rapid communication within the mobile robotic system. Signal strength analysis is also carried out. This reveals that the reliability of sending signals is up to 99%. Thus, the application of the CoAP protocol shows enough potential to develop IoT-enabled mobile robotic systems and allied applications.}, } @article {pmid40090931, year = {2025}, author = {Liu, G and Lei, J and Guo, Z and Chai, S and Ren, C}, title = {Lightweight obstacle detection for unmanned mining trucks in open-pit mines.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {9028}, pmid = {40090931}, issn = {2045-2322}, support = {52374123//The National Natural Science Foundation of China is a funded project/ ; }, abstract = {This paper aims to solve the problem of the difficulty in balancing the model size and detection accuracy of the unmanned mining truck detection network in open-pit mines, as well as the problem that the existing model is not suitable for mining truck equipment. To address this problem, we proposed a lightweight vehicle detection algorithm model based on the improvement of YOLOv8. Through a series of innovative structural adjustments and optimization strategies, the model has achieved high accuracy and low complexity. This paper replaces the backbone network of YOLOv8s with the FasterNet_t0 (FN) network. This network has the advantages of simple structure and high lightweight, which effectively reduces the amount of calculation and parameters of the model. Then the feature extraction structure of the YOLOv8 neck is replaced with the BiFPN (Bi-directional Feature Pyramid Network) structure. By increasing cross-layer connections and removing nodes with low contribution to feature fusion, the fusion and utilization of features of different scales are optimized, the model performance is further improved, and the number of parameters and calculations are reduced. To make up for the possible loss of accuracy caused by lightweight improvements, this paper replaces the detection head with Dynamic Head. This design can introduce the self-attention mechanism from the three dimensions of scale, space, and task, significantly improving the detection accuracy of the model while avoiding the additional computational burden. In terms of loss function, this paper introduces a combination of SIoU loss and NWD (normalized Gaussian Wasserstein distance) loss. These two adjustments enable the model to cope with different scenarios more accurately, especially the detection effect of small target mining trucks is significantly improved. In addition, this paper also adopts the amplitude-based layer adaptive sparse pruning algorithm (LAMP) to further compress the model size while maintaining efficient detection performance. Through this pruning strategy, the model further reduces its dependence on computing resources while maintaining key performance. In the experimental part, a dataset of 3000 images was first constructed, and these images were preprocessed, including image enhancement, denoising, cropping, and scaling. The experimental environment was set up on the Autodl cloud server, using the PyTorch 2.5.1 framework and Python 3.10 environment. Through four sets of ablation experiments, we verified the specific impact of each improvement on the model performance. The experimental results show that the lightweight improvement strategy significantly improves the detection accuracy of the model, while greatly reducing the number of parameters and calculations of the model. Finally, we conducted a comprehensive comparative analysis of the improved YOLOv8s model with other popular algorithms and models. The results show that our model leads in detection accuracy with 76.9%, which is more than 10% higher than the performance of similar models. At the same time, compared with other models that achieve similar accuracy levels, our model is only about 20% of the size. These results fully prove that the improvement strategy we adopted is feasible and has obvious advantages in improving model efficiency.}, } @article {pmid40087343, year = {2025}, author = {Lee, H and Jun, K}, title = {Range dependent Hamiltonian algorithms for numerical QUBO formulation.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {8819}, pmid = {40087343}, issn = {2045-2322}, support = {2020M3H3A1110365//Ministry of Science and ICT, South Korea/ ; 2020M3H3A1110365//Ministry of Science and ICT, South Korea/ ; }, abstract = {With the advent and development of quantum computers, various quantum algorithms that can solve linear equations and eigenvalues faster than classical computers have been developed. In particular, a hybrid solver provided by D-Wave's Leap quantum cloud service can utilize up to two million variables. Using this technology, quadratic unconstrained binary optimization (QUBO) models have been proposed for linear systems, eigenvalue problems, RSA cryptosystems, and computed tomography (CT) image reconstructions. Generally, QUBO formulation is obtained through simple arithmetic operations, which offers great potential for future development with the progress of quantum computers. A common method here was to binarize the variables and match them to multiple qubits. To achieve the accuracy of 64 bits per variable, 64 logical qubits must be used. Finding the global minimum energy in quantum optimization becomes more difficult as more logical qubits are used; thus, a quantum parallel computing algorithm that can create and compute multiple QUBO models is introduced here. This new algorithm divides the entire domain each variable can have into multiple subranges to generate QUBO models. This paper demonstrates the superior performance of this new algorithm particularly when utilizing an algorithm for binary variables.}, } @article {pmid40085197, year = {2025}, author = {Weicken, E and Mittermaier, M and Hoeren, T and Kliesch, J and Wiegand, T and Witzenrath, M and Ballhausen, M and Karagiannidis, C and Sander, LE and Gröschel, MI}, title = {[Focus: artificial intelligence in medicine-Legal aspects of using large language models in clinical practice].}, journal = {Innere Medizin (Heidelberg, Germany)}, volume = {66}, number = {4}, pages = {436-441}, pmid = {40085197}, issn = {2731-7099}, mesh = {*Artificial Intelligence/legislation & jurisprudence/ethics ; Germany ; Humans ; *Natural Language Processing ; Computer Security/legislation & jurisprudence ; European Union ; }, abstract = {BACKGROUND: The use of artificial intelligence (AI) and natural language processing (NLP) methods in medicine, particularly large language models (LLMs), offers opportunities to advance the healthcare system and patient care in Germany. LLMs have recently gained importance, but their practical application in hospitals and practices has so far been limited. Research and implementation are hampered by a complex legal situation. It is essential to research LLMs in clinical studies in Germany and to develop guidelines for users.

OBJECTIVE: How can foundations for the data protection-compliant use of LLMs, particularly cloud-based LLMs, be established in the German healthcare system? The aim of this work is to present the data protection aspects of using cloud-based LLMs in clinical research and patient care in Germany and the European Union (EU); to this end, key statements of a legal opinion on this matter are considered. Insofar as the requirements for use are regulated by state laws (vs. federal laws), the legal situation in Berlin is used as a basis.

MATERIALS AND METHODS: As part of a research project, a legal opinion was commissioned to clarify the data protection aspects of the use of LLMs with cloud-based solutions at the Charité - University Hospital Berlin, Germany. Specific questions regarding the processing of personal data were examined.

RESULTS: The legal framework varies depending on the type of data processing and the relevant federal state (Bundesland). For anonymous data, data protection requirements need not apply. Where personal data is processed, it should be pseudonymized if possible. In the research context, patient consent is usually required to process their personal data, and data processing agreements must be concluded with the providers. Recommendations originating from LLMs must always be reviewed by medical doctors.

CONCLUSIONS: The use of cloud-based LLMs is possible as long as data protection requirements are observed. The legal framework is complex and requires transparency from providers. Future developments could increase the potential of AI and particularly LLMs in everyday clinical practice; however, clear legal and ethical guidelines are necessary.}, } @article {pmid40082502, year = {2025}, author = {Lv, F}, title = {Research on optimization strategies of university ideological and political parenting models under the empowerment of digital intelligence.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {8680}, pmid = {40082502}, issn = {2045-2322}, abstract = {The development of big data, artificial intelligence, cloud computing, and other new generations of intellectual technologies has triggered digital changes in university civic education's resources, forms, and modes. It has become a new engine to promote the innovation and development of the civic education model. Digital and intellectual technology-enabled university civic and political education model can carry the concept of innovation through the subject, content, process, and scene of education and promote the development of the ideological and political parenting model in the direction of refinement, specialization, and conscientization. Based on the differential game model, this paper comprehensively considers the model characteristics of universities, enterprises, and governments and their intertemporal characteristics of collaborative parenting and innovation behaviors. It constructs the no-incentive, cost-sharing, and collaborative cooperation models, respectively, and obtains the optimal trajectories for the degree of effort, the subsidy coefficient, the optimal benefit function, and the digital and intelligent technology stock. The conclusions are as follows: (1) resource input cost and technological innovation cost are the key driving variables of university ideological and political parenting; (2) the government's cost subsidy improves the degree of innovation effort of universities and enterprises, and thus achieves Pareto optimality for the three parties; (3) the degree of innovation effort, overall benefit and technology level of the three parties in the synergistic cooperation model is better than that of the other two models. Finally, the validity of the model is verified through numerical simulation analysis. An in-depth discussion of the digital intelligence-enabled ideological and political parenting model is necessary for the high-quality development of education, which helps improve the scientific and practical ideological and political parenting in the digital age.}, } @article {pmid40081211, year = {2025}, author = {Alsharabi, N and Alayba, A and Alshammari, G and Alsaffar, M and Jadi, A}, title = {An end-to-end four tier remote healthcare monitoring framework using edge-cloud computing and redactable blockchain.}, journal = {Computers in biology and medicine}, volume = {189}, number = {}, pages = {109987}, doi = {10.1016/j.compbiomed.2025.109987}, pmid = {40081211}, issn = {1879-0534}, mesh = {*Cloud Computing ; Humans ; *Blockchain ; Telemedicine ; Internet of Things ; Wireless Technology ; Computer Security ; Monitoring, Physiologic/instrumentation/methods ; Wearable Electronic Devices ; }, abstract = {The Medical Internet of Things (MIoTs) encompasses compact, energy-efficient wireless sensor devices designed to monitor patients' body outcomes. Healthcare networks provide constant data monitoring, enabling patients to live independently. Despite advancements in MIoTs, critical issues persist that can affect the Quality of Service (QoS) in the network. The wearable IoT module collects data and stores it on cloud servers, making it vulnerable to privacy breaches and attacks by unauthorized users. To address these challenges, we propose an end-to-end secure remote healthcare framework called the Four Tier Remote Healthcare Monitoring Framework (FTRHMF). This framework comprises multiple entities, including Wireless Body Sensors (WBS), Distributed Gateway (DGW), Distributed Edge Server (DES), Blockchain Server (BS), and Cloud Server (CS). The framework operates in four tiers. In the first tier, WBS and DGW are authenticated to the BS using secret credentials, ensuring privacy and security for all entities. In the second tier, authenticated WBS transmit data to the DGW via a two-level Hybridized Metaheuristic Secure Federated Clustered Routing Protocol (HyMSFCRP), which leverages Mountaineering Team-Based Optimization (MTBO) and Sea Horse Optimization (SHO) algorithms. In the third tier, sensor reports are prioritized and analyzed using Multi-Agent Deep Reinforcement Learning (MA-DRL), with the results fed into the Hybrid-Transformer Deep Learning (HTDL) model. This model combines Lite Convolutional Neural Network and Swin Transformer networks to detect patient outcomes accurately. Finally, in the fourth tier, patients' outcomes are securely stored in a cloud-assisted redactable blockchain layer, allowing modifications without compromising the integrity of the original data. This research enhance the network lifetime by 18.3 %, reduce the transmission delays by 15.6 %, ensures classification accuracy of 7.4 %, with PSNR of 46.12 dB, SSIM of 0.8894, and MAE of 22.51 when compared to the existing works.}, } @article {pmid40081084, year = {2025}, author = {Alsaleh, A}, title = {Toward a conceptual model to improve the user experience of a sustainable and secure intelligent transport system.}, journal = {Acta psychologica}, volume = {255}, number = {}, pages = {104892}, doi = {10.1016/j.actpsy.2025.104892}, pmid = {40081084}, issn = {1873-6297}, mesh = {Humans ; *Automobile Driving ; *Transportation/methods ; *Models, Theoretical ; Cloud Computing ; *Artificial Intelligence ; }, abstract = {The rapid advancement of automotive technologies has spurred the development of innovative applications within intelligent transportation systems (ITS), aimed at enhancing safety, efficiency and sustainability. These applications, such as advanced driver assistance systems (ADAS), vehicle-to-everything (V2X) communication and autonomous driving, are transforming transportation by enabling adaptive cruise control, lane-keeping assistance, real-time traffic management and predictive maintenance. By leveraging cloud computing and vehicular networks, intelligent transportation solutions optimize traffic flow, improve emergency response systems, and forecast potential collisions, contributing to safer and more efficient roads. This study proposes a Vehicular Cloud-based Intelligent Transportation System (VCITS) model, integrating vehicle-to-vehicle (V2V) and vehicle-to-infrastructure (V2I) communication through roadside units (RSUs) and cloudlets to provide real-time access to cloud resources. A novel search and management protocol, supported by a tailored algorithm, was developed to enhance resource allocation success rates for vehicles within a defined area of interest. The study also identifies critical security vulnerabilities in smart vehicle networks, emphasizing the need for robust solutions to protect data integrity and privacy. The simulation experiments evaluated the VCITS model under various traffic densities and resource request scenarios. Results demonstrated that the proposed model effectively maintained service availability rates exceeding 85 % even under high demand. Furthermore, the system exhibited scalability and stability, with minimal service loss and efficient handling of control messages. These findings highlight the potential of the VCITS model to advance smart traffic management while addressing computational efficiency and security challenges. Future research directions include integrating cybersecurity measures and leveraging emerging technologies like 5G and 6G to further enhance system performance and safety.}, } @article {pmid40075100, year = {2025}, author = {Zinchenko, A and Fernandez-Gamiz, U and Redchyts, D and Gorna, O and Bilousova, T}, title = {An efficient parallelization technique for the coupled problems of fluid, gas and plasma mechanics in the grid environment.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {8629}, doi = {10.1038/s41598-025-91695-5}, pmid = {40075100}, issn = {2045-2322}, support = {ELKARTEK24/78 KK-2024/00117//Ekonomiaren Garapen eta Lehiakortasun Saila, Eusko Jaurlaritza/ ; }, abstract = {The development of efficient parallelization strategies for numerical simulation methods of fluid, gas and plasma mechanics remains one of the key technology challenges in modern scientific computing. The numerical models of gas and plasma dynamics based on the Navier-Stokes and electrodynamics equations require enormous computational efforts. For such cases, the use of parallel and distributed computing proved to be effective. The Grid computing environment could provide virtually unlimited computational resources and data storage, convenient task launch and monitoring tools, graphical user interfaces such as web portals and visualization systems. However, the deployment of traditional CFD solvers in the Grid environment remains very limited because basically it requires the cluster computing architecture. This study explores the applicability of distributed computing and Grid technologies for solving the weak-coupled problems of fluid, gas and plasma mechanics, including techniques of flow separation control like using plasma actuators to influence boundary layer structure. The adaptation techniques for the algorithms of coupled computational fluid dynamics and electrodynamics problems for distributed computations on grid and cloud infrastructure are presented. A parallel solver suitable for the Grid infrastructure has been developed and the test calculations in the distributed computing environment are performed. The simulation results for partially ionized separated flow behind the circular cylinder are analysed. Discussion includes some performance metrics and parallelization effectiveness estimation. The potential of the Grid infrastructure to provide a powerful and flexible computing environment for fast and efficient solution of weak-coupled problems of fluid, gas and plasma mechanics has been shown.}, } @article {pmid40074765, year = {2025}, author = {Puchala, S and Muchnik, E and Ralescu, A and Hartings, JA}, title = {Automated detection of spreading depolarizations in electrocorticography.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {8556}, pmid = {40074765}, issn = {2045-2322}, support = {W81XWH-16-2-0020//Defense Medical Research and Development Program/ ; }, mesh = {Humans ; *Electrocorticography/methods ; Male ; Algorithms ; Female ; Cortical Spreading Depression/physiology ; Machine Learning ; Adult ; Middle Aged ; Cerebral Cortex/physiopathology/physiology ; }, abstract = {Spreading depolarizations (SD) in the cerebral cortex are a novel mechanism of lesion development and worse outcomes after acute brain injury, but accurate diagnosis by neurophysiology is a barrier to more widespread application in neurocritical care. Here we developed an automated method for SD detection by training machine-learning models on electrocorticography data from a 14-patient cohort that included 1,548 examples of SD direct-current waveforms as identified in expert manual scoring. As determined by leave-one-patient-out cross-validation, optimal performance was achieved with a gradient-boosting model using 30 features computed from 400-s electrocorticography segments sampled at 0.1 Hz. This model was applied to continuous electrocorticography data by generating a time series of SD probability [PSD(t)], and threshold PSD(t) values to trigger SD predictions were determined empirically. The developed algorithm was then tested on a novel dataset of 10 patients, resulting in 1,252 true positive detections (/1,953; 64% sensitivity) and 323 false positives (6.5/day). Secondary manual review of false positives showed that a majority (224, or 69%) were likely real SDs, highlighting the conservative nature of expert scoring and the utility of automation. SD detection using sparse sampling (0.1 Hz) is optimal for streaming and use in cloud computing applications for neurocritical care.}, } @article {pmid40070808, year = {2025}, author = {Krishna, K}, title = {Advancements in cache management: a review of machine learning innovations for enhanced performance and security.}, journal = {Frontiers in artificial intelligence}, volume = {8}, number = {}, pages = {1441250}, pmid = {40070808}, issn = {2624-8212}, abstract = {Machine learning techniques have emerged as a promising tool for efficient cache management, helping optimize cache performance and fortify against security threats. The range of machine learning is vast, from reinforcement learning-based cache replacement policies to Long Short-Term Memory (LSTM) models predicting content characteristics for caching decisions. Diverse techniques such as imitation learning, reinforcement learning, and neural networks are extensively useful in cache-based attack detection, dynamic cache management, and content caching in edge networks. The versatility of machine learning techniques enables them to tackle various cache management challenges, from adapting to workload characteristics to improving cache hit rates in content delivery networks. A comprehensive review of various machine learning approaches for cache management is presented, which helps the community learn how machine learning is used to solve practical challenges in cache management. It includes reinforcement learning, deep learning, and imitation learning-driven cache replacement in hardware caches. Information on content caching strategies and dynamic cache management using various machine learning techniques in cloud and edge computing environments is also presented. Machine learning-driven methods to mitigate security threats in cache management have also been discussed.}, } @article {pmid40069277, year = {2025}, author = {Alyas, T and Abbas, Q and Niazi, S and Alqahtany, SS and Alghamdi, T and Alzahrani, A and Tabassum, N and Ibrahim, AM}, title = {Multi blockchain architecture for judicial case management using smart contracts.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {8471}, pmid = {40069277}, issn = {2045-2322}, abstract = {The infusion of technology across various domains, particularly in process-centric and multi-stakeholder sectors, demands transparency, accuracy, and scalability. This paper introduces a blockchain and intelligent contract-based framework for judicial case management, proposing a private-to-public blockchain approach to establish a transparent, decentralized, and robust system. An Integrated Solution for Judicial Case Management using Blockchain Technology and Smart Contracts. This paper aims to introduce a multi-blockchain structure for managing judicial cases based on smart contracts, ultimately rendering cases more transparent, distributed, and tenacious. This solution is innovative because it will leverage both private and public blockchains to satisfy the unique requirements of judicial processes, with transparent public access for authorized digital events and transactions occurring on the freely available blockchain and a three-tiered private blockchain structure to address private stakeholder interactions while ensuring that operational consistency, security, and data privacy requirements are met. Leveraging the decentralized and tamper-proof approach of blockchain and cloud computing, the framework aims to increase data security and cut down on administrative burdens. This framework offers a scalable and secure solution for modernizing judicial systems, supporting smart governance's shift towards digital transparency and accountability.}, } @article {pmid40065898, year = {2025}, author = {Bedia, SV and Shapurwala, MA and Kharge, BP and Bedia, AS and Patil, A}, title = {A Comprehensive Guide to Implement Artificial Intelligence Cloud Solutions in a Dental Clinic: A Review.}, journal = {Cureus}, volume = {17}, number = {2}, pages = {e78718}, pmid = {40065898}, issn = {2168-8184}, abstract = {Integrating the artificial intelligence (AI) cloud into dental clinics can enhance diagnostics, streamline operations, and improve patient care. This article explores the adoption of AI-powered cloud solutions in dental clinics, focusing on infrastructure requirements, software licensing, staff training, system optimization, and the challenges faced during implementation. It provides a detailed guide for dental practices to transition to AI cloud systems. We reviewed existing literature, technological guidelines, and practical implementation strategies for integrating AI cloud in dental practices. The methodology includes a step-by-step approach to understanding clinic needs, selecting appropriate software, training staff, and ensuring system optimization and maintenance. Integrating AI cloud solutions can drastically improve clinical outcomes and operational efficiency. Despite the challenges, proper planning, infrastructure investment, and continuous training can ensure a smooth transition and maximize the benefits of AI technologies in dental care.}, } @article {pmid40062269, year = {2025}, author = {Alshardan, A and Mahgoub, H and Alahmari, S and Alonazi, M and Marzouk, R and Mohamed, A}, title = {Cloud-to-Thing continuum-based sports monitoring system using machine learning and deep learning model.}, journal = {PeerJ. Computer science}, volume = {11}, number = {}, pages = {e2539}, pmid = {40062269}, issn = {2376-5992}, abstract = {Sports monitoring and analysis have seen significant advancements by integrating cloud computing and continuum paradigms facilitated by machine learning and deep learning techniques. This study presents a novel approach for sports monitoring, specifically focusing on basketball, that seamlessly transitions from traditional cloud-based architectures to a continuum paradigm, enabling real-time analysis and insights into player performance and team dynamics. Leveraging machine learning and deep learning algorithms, our framework offers enhanced capabilities for player tracking, action recognition, and performance evaluation in various sports scenarios. The proposed Cloud-to-Thing continuum-based sports monitoring system utilizes advanced techniques such as Improved Mask R-CNN for pose estimation and a hybrid metaheuristic algorithm combined with a generative adversarial network (GAN) for classification. Our system significantly improves latency and accuracy, reducing latency to 5.1 ms and achieving an accuracy of 94.25%, which outperforms existing methods in the literature. These results highlight the system's ability to provide real-time, precise, and scalable sports monitoring, enabling immediate feedback for time-sensitive applications. This research has significantly improved real-time sports event analysis, contributing to improved player performance evaluation, enhanced team strategies, and informed tactical adjustments.}, } @article {pmid40062251, year = {2025}, author = {Rajagopal, D and Subramanian, PKT}, title = {AI augmented edge and fog computing for Internet of Health Things (IoHT).}, journal = {PeerJ. Computer science}, volume = {11}, number = {}, pages = {e2431}, pmid = {40062251}, issn = {2376-5992}, abstract = {Patients today seek a more advanced and personalized health-care system that keeps up with the pace of modern living. Cloud computing delivers resources over the Internet and enables the deployment of an infinite number of applications to provide services to many sectors. The primary limitation of these cloud frameworks right now is their limited scalability, which results in their inability to meet needs. An edge/fog computing environment, paired with current computing techniques, is the answer to fulfill the energy efficiency and latency requirements for the real-time collection and analysis of health data. Additionally, the Internet of Things (IoT) revolution has been essential in changing contemporary healthcare systems by integrating social, economic, and technological perspectives. This requires transitioning from unadventurous healthcare systems to more adapted healthcare systems that allow patients to be identified, managed, and evaluated more easily. These techniques allow data from many sources to be integrated to effectively assess patient health status and predict potential preventive actions. A subset of the Internet of Things, the Internet of Health Things (IoHT) enables the remote exchange of data for physical processes like patient monitoring, treatment progress, observation, and consultation. Previous surveys related to healthcare mainly focused on architecture and networking, which left untouched important aspects of smart systems like optimal computing techniques such as artificial intelligence, deep learning, advanced technologies, and services that includes 5G and unified communication as a service (UCaaS). This study aims to examine future and existing fog and edge computing architectures and methods that have been augmented with artificial intelligence (AI) for use in healthcare applications, as well as defining the demands and challenges of incorporating fog and edge computing technology in IoHT, thereby helping healthcare professionals and technicians identify the relevant technologies required based on their need for developing IoHT frameworks for remote healthcare. Among the crucial elements to take into account in an IoHT framework are efficient resource management, low latency, and strong security. This review addresses several machine learning techniques for efficient resource management in the IoT, where machine learning (ML) and AI are crucial. It has been noted how the use of modern technologies, such as narrow band-IoT (NB-IoT) for wider coverage and Blockchain technology for security, is transforming IoHT. The last part of the review focuses on the future challenges posed by advanced technologies and services. This study provides prospective research suggestions for enhancing edge and fog computing services for healthcare with modern technologies in order to give patients with an improved quality of life.}, } @article {pmid40053761, year = {2025}, author = {Parciak, M and Pierlet, N and Peeters, LM}, title = {Empowering Health Care Actors to Contribute to the Implementation of Health Data Integration Platforms: Retrospective of the medEmotion Project.}, journal = {Journal of medical Internet research}, volume = {27}, number = {}, pages = {e68083}, pmid = {40053761}, issn = {1438-8871}, mesh = {Humans ; *Empowerment ; Belgium ; Retrospective Studies ; Delivery of Health Care ; Stakeholder Participation ; Cooperative Behavior ; }, abstract = {Health data integration platforms are vital to drive collaborative, interdisciplinary medical research projects. Developing such a platform requires input from different stakeholders. Managing these stakeholders and steering platform development is challenging, and misaligning the platform to the partners' strategies might lead to a low acceptance of the final platform. We present the medEmotion project, a collaborative effort among 7 partners from health care, academia, and industry to develop a health data integration platform for the region of Limburg in Belgium. We focus on the development process and stakeholder engagement, aiming to give practical advice for similar future efforts based on our reflections on medEmotion. We introduce Personas to paraphrase different roles that stakeholders take and Demonstrators that summarize personas' requirements with respect to the platform. Both the personas and the demonstrators serve 2 purposes. First, they are used to define technical requirements for the medEmotion platform. Second, they represent a communication vehicle that simplifies discussions among all stakeholders. Based on the personas and demonstrators, we present the medEmotion platform based on components from the Microsoft Azure cloud. The demonstrators are based on real-world use cases and showcase the utility of the platform. We reflect on the development process of medEmotion and distill takeaway messages that will be helpful for future projects. Investing in community building, stakeholder engagement, and education is vital to building an ecosystem for a health data integration platform. Instead of academic-led projects, the health care providers themselves ideally drive collaboration among health care providers. The providers are best positioned to address hospital-specific requirements, while academics take a neutral mediator role. This also includes the ideation phase, where it is vital to ensure the involvement of all stakeholders. Finally, balancing innovation with implementation is key to developing an innovative yet sustainable health data integration platform.}, } @article {pmid40050991, year = {2025}, author = {Lee, H and Kim, W and Kwon, N and Kim, C and Kim, S and An, JY}, title = {Lessons from national biobank projects utilizing whole-genome sequencing for population-scale genomics.}, journal = {Genomics & informatics}, volume = {23}, number = {1}, pages = {8}, pmid = {40050991}, issn = {1598-866X}, abstract = {Large-scale national biobank projects utilizing whole-genome sequencing have emerged as transformative resources for understanding human genetic variation and its relationship to health and disease. These initiatives, which include the UK Biobank, All of Us Research Program, Singapore's PRECISE, Biobank Japan, and the National Project of Bio-Big Data of Korea, are generating unprecedented volumes of high-resolution genomic data integrated with comprehensive phenotypic, environmental, and clinical information. This review examines the methodologies, contributions, and challenges of major WGS-based national genome projects worldwide. We first discuss the landscape of national biobank initiatives, highlighting their distinct approaches to data collection, participant recruitment, and phenotype characterization. We then introduce recent technological advances that enable efficient processing and analysis of large-scale WGS data, including improvements in variant calling algorithms, innovative methods for creating multi-sample VCFs, optimized data storage formats, and cloud-based computing solutions. The review synthesizes key discoveries from these projects, particularly in identifying expression quantitative trait loci and rare variants associated with complex diseases. Our review introduces the latest findings from the National Project of Bio-Big Data of Korea, which has advanced our understanding of population-specific genetic variation and rare diseases in Korean and East Asian populations. Finally, we discuss future directions and challenges in maximizing the impact of these resources on precision medicine and global health equity. This comprehensive examination demonstrates how large-scale national genome projects are revolutionizing genetic research and healthcare delivery while highlighting the importance of continued investment in diverse, population-specific genomic resources.}, } @article {pmid40046768, year = {2025}, author = {Zhang, G}, title = {Cloud computing convergence: integrating computer applications and information management for enhanced efficiency.}, journal = {Frontiers in big data}, volume = {8}, number = {}, pages = {1508087}, pmid = {40046768}, issn = {2624-909X}, abstract = {This study examines the transformative impact of cloud computing on the integration of computer applications and information management systems to improve operational efficiency. Grounded in a robust methodological framework, the research employs experimental testing and comparative data analysis to assess the performance of an information management system within a cloud computing environment. Data was meticulously collected and analyzed, highlighting a threshold where user demand surpasses 400, leading to a stabilization in CPU utilization at an optimal level and maintaining subsystem response times consistently below 5 s. This comprehensive evaluation underscores the significant advantages of cloud computing, demonstrating its capacity to optimize the synergy between computer applications and information management. The findings not only contribute to theoretical advancements in the field but also offer actionable insights for organizations seeking to enhance efficiency through effective cloud-based solutions.}, } @article {pmid40046689, year = {2025}, author = {Saeedbakhsh, S and Mohammadi, M and Younesi, S and Sattari, M}, title = {Using Internet of Things for Child Care: A Systematic Review.}, journal = {International journal of preventive medicine}, volume = {16}, number = {}, pages = {3}, pmid = {40046689}, issn = {2008-7802}, abstract = {BACKGROUND: In smart cities, prioritizing child safety through affordable technology like the Internet of Things (IoT) is crucial for parents. This study seeks to investigate different IoT tools that can prevent and address accidents involving children. The goal is to alleviate the emotional and financial toll of such incidents due to their high mortality rates.

METHODS: This study considers articles published in English that use IoT for children's healthcare. PubMed, Science Direct, and Web of Science databases are considered as searchable databases. 273 studies were retrieved after the initial search. After eliminating duplicate records, studies were assessed based on input and output criteria. Titles and abstracts were reviewed for relevance. Articles not meeting criteria were excluded. Finally, 29 cases had the necessary criteria to enter this study.

RESULTS: The study reveals that India is at the forefront of IoT research for children, followed by Italy and China. Studies mainly occur indoors, utilizing wearable sensors like heart rate, motion, and tracking sensors. Biosignal sensors and technologies such as Zigbee and image recognition are commonly used for data collection and analysis. Diverse approaches, including cloud computing and machine vision, are applied in this innovative field.

CONCLUSIONS: In conclusion, IoT for children is mainly seen in developed countries like India, Italy, and China. Studies focus on indoor use, using wearable sensors for heart rate monitoring. Biosignal sensors and various technologies like Zigbee, Kinect, image recognition, RFID, and robots contribute to enhancing children's well-being.}, } @article {pmid40041774, year = {2025}, author = {Efendi, A and Ammarullah, MI and Isa, IGT and Sari, MP and Izza, JN and Nugroho, YS and Nasrullah, H and Alfian, D}, title = {IoT-Based Elderly Health Monitoring System Using Firebase Cloud Computing.}, journal = {Health science reports}, volume = {8}, number = {3}, pages = {e70498}, pmid = {40041774}, issn = {2398-8835}, abstract = {BACKGROUND AND AIMS: The increasing elderly population presents significant challenges for healthcare systems, necessitating innovative solutions for continuous health monitoring. This study develops and validates an IoT-based elderly monitoring system designed to enhance the quality of life for elderly people. The system features a robust Android-based user interface integrated with the Firebase cloud platform, ensuring real-time data collection and analysis. In addition, a supervised machine learning technology is implemented to conduct prediction task of the observed user whether in "stable" or "not stable" condition based on real-time parameter.

METHODS: The system architecture adopts the IoT layer including physical layer, network layer, and application layer. Device validation is conducted by involving six participants to measure the real-time data of heart-rate, oxygen saturation, and body temperature, then analysed by mean average percentage error (MAPE) to define error rate. A comparative experiment is conducted to define the optimal supervised machine learning model to be deployed into the system by analysing evaluation metrics. Meanwhile, the user satisfaction aspect evaluated by the terms of usability, comfort, security, and effectiveness.

RESULTS: IoT-based elderly health monitoring system has been constructed with a MAPE of 0.90% across the parameters: heart-rate (1.68%), oxygen saturation (0.57%), and body temperature (0.44%). In machine learning experiment indicates XGBoost model has the optimal performance based on the evaluation metrics of accuracy and F1 score which generates 0.973 and 0.970, respectively. In user satisfaction aspect based on usability, comfort, security, and effectiveness achieving a high rating of 86.55%.

CONCLUSION: This system offers practical applications for both elderly users and caregivers, enabling real-time monitoring of health conditions. Future enhancements may include integration with artificial intelligence technologies such as machine learning and deep learning to predict health conditions from data patterns, further improving the system's capabilities and effectiveness in elderly care.}, } @article {pmid40039777, year = {2024}, author = {Duan, S and Yong, R and Yuan, H and Cai, T and Huang, K and Hoettges, K and Lim, EG and Song, P}, title = {Automated Offline Smartphone-Assisted Microfluidic Paper-Based Analytical Device for Biomarker Detection of Alzheimer's Disease.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2024}, number = {}, pages = {1-5}, doi = {10.1109/EMBC53108.2024.10781517}, pmid = {40039777}, issn = {2694-0604}, mesh = {*Alzheimer Disease/diagnosis ; *Smartphone ; Humans ; *Biomarkers/blood ; *Amyloid beta-Peptides/blood ; *Enzyme-Linked Immunosorbent Assay ; Paper ; Peptide Fragments/blood/analysis ; Microfluidic Analytical Techniques/instrumentation ; Colorimetry/instrumentation/methods ; Deep Learning ; }, abstract = {This paper presents a smartphone-assisted microfluidic paper-based analytical device (μPAD), which was applied to detect Alzheimer's disease biomarkers, especially in resource-limited regions. This device implements deep learning (DL)-assisted offline smartphone detection, eliminating the requirement for large computing devices and cloud computing power. In addition, a smartphone-controlled rotary valve enables a fully automated colorimetric enzyme-linked immunosorbent assay (c-ELISA) on μPADs. It reduces detection errors caused by human operation and further increases the accuracy of μPAD c-ELISA. We realized a sandwich c-ELISA targeting β-amyloid peptide 1-42 (Aβ 1-42) in artificial plasma, and our device provided a detection limit of 15.07 pg/mL. We collected 750 images for the training of the DL YOLOv5 model. The training accuracy is 88.5%, which is 11.83% higher than the traditional curve-fitting result analysis method. Utilizing the YOLOv5 model with the NCNN framework facilitated offline detection directly on the smartphone. Furthermore, we developed a smartphone application to operate the experimental process, realizing user-friendly rapid sample detection.}, } @article {pmid40039036, year = {2024}, author = {Delannes-Molka, D and Jackson, KL and King, E and Duric, Z}, title = {Towards Markerless Motion Estimation of Human Functional Upper Extremity Movement.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2024}, number = {}, pages = {1-7}, doi = {10.1109/EMBC53108.2024.10782591}, pmid = {40039036}, issn = {2694-0604}, mesh = {Humans ; *Upper Extremity/physiology ; *Movement/physiology ; Biomechanical Phenomena ; Algorithms ; }, abstract = {Markerless motion capture of human movement is a potentially useful approach for providing movement scientists and rehabilitation specialists with a portable and low-cost method for measuring functional upper extremity movement. This is in contrast with optical and inertial motion capture systems, which often require specialized equipment and expertise to use. Existing methods for markerless motion capture have focused on inferring 2D or 3D keypoints on the body and estimating volumetric representations, both using RGB-D. The keypoints and volumes are then used to compute quantities like joint angles and velocity magnitude over time. However, these methods do not have sufficient accuracy to capture fine human motions and, as a result, have largely been restricted to capturing gross movements and rehabilitation games. Furthermore, most of these methods have not used depth images to estimate motion directly. This work proposes using the depth images from an RGB-D camera to compute the upper extremity motion directly by segmenting the upper extremity into components of a kinematic chain, estimating the motion of the rigid portions (i.e., the upper and lower arm) using ICP or Distance Transform across sequential frames, and computing the motion of the end-effector (e.g., wrist) relative to the torso. Methods with data from both the Microsoft Azure Kinect Camera and 9-camera OptiTrack Motive motion capture system (Mocap) were compared. Point Cloud methods performed comparably to Mocap on tracking rotation and velocity of a human arm and could be an affordable alternative to Mocap in the future. While the methods were tested on gross motions, future works would include refining and evaluating these methods for fine motion.}, } @article {pmid40038515, year = {2025}, author = {Alshemaimri, B and Badshah, A and Daud, A and Bukhari, A and Alsini, R and Alghushairy, O}, title = {Regional computing approach for educational big data.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {7619}, pmid = {40038515}, issn = {2045-2322}, mesh = {*Big Data ; Humans ; *Cloud Computing ; Educational Technology/methods ; }, abstract = {The educational landscape is witnessing a transformation with the integration of Educational Technology (Edutech). As educational institutions adopt digital platforms and tools, the generation of Educational Big Data (EBD) has significantly increased. Research indicates that educational institutions produce massive data, including student enrollment records, academic performance metrics, attendance records, learning activities, and interactions within digital learning environments. This influx of data needs efficient processing to derive actionable insights and enhance the learning experience. Real-time data processing has a critical part in educational environments to support various functions such as personalized learning, adaptive assessment, and administrative decision-making. However, there may be challenges in sending large amounts of educational data to cloud servers, i.e., latency, cost and network congestion. These challenges make it more difficult to provide educators and students with timely insights and services, which reduces the efficiency of educational activities. This paper proposes a Regional Computing (RC) paradigm designed specifically for big data management in education to address these issues. In this case, RC is established within educational regions and intended to decentralize data processing. To reduce dependency on cloud infrastructure, these regional servers are strategically located to collect, process, and store big data related to education regionally. Our investigation results show that RC significantly reduces latency to 203.11 ms for 2,000 devices, compared to 707.1 ms in Cloud Computing (CC). It is also more cost-efficient, with a total cost of just 1.14 USD versus 5.36 USD in the cloud. Furthermore, it avoids the 600% congestion surges seen in cloud setups and maintains consistent throughput under high workloads, establishing RC as the optimal solution for managing EBD.}, } @article {pmid40027081, year = {2025}, author = {Verdet, A and Hamdaqa, M and Silva, LD and Khomh, F}, title = {Assessing the adoption of security policies by developers in terraform across different cloud providers.}, journal = {Empirical software engineering}, volume = {30}, number = {3}, pages = {74}, doi = {10.1007/s10664-024-10610-0}, pmid = {40027081}, issn = {1573-7616}, abstract = {Cloud computing has become popular thanks to the widespread use of Infrastructure as Code (IaC) tools, allowing the community to manage and configure cloud infrastructure using scripts. However, the scripting process does not automatically prevent practitioners from introducing misconfigurations, vulnerabilities, or privacy risks. As a result, ensuring security relies on practitioners' understanding and the adoption of explicit policies. To understand how practitioners deal with this problem, we perform an empirical study analyzing the adoption of scripted security best practices present in Terraform files, applied on AWS, Azure, and Google Cloud. We assess the adoption of these practices by analyzing a sample of 812 open-source GitHub projects. We scan each project's configuration files, looking for policy implementation through static analysis (Checkov and Tfsec). The category Access policy emerges as the most widely adopted in all providers, while Encryption at rest presents the most neglected policies. Regarding the cloud providers, we observe that AWS and Azure present similar behavior regarding attended and neglected policies. Finally, we provide guidelines for cloud practitioners to limit infrastructure vulnerability and discuss further aspects associated with policies that have yet to be extensively embraced within the industry.}, } @article {pmid40016446, year = {2025}, author = {Zhang, A and Tariq, A and Quddoos, A and Naz, I and Aslam, RW and Barboza, E and Ullah, S and Abdullah-Al-Wadud, M}, title = {Spatio-temporal analysis of urban expansion and land use dynamics using google earth engine and predictive models.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {6993}, pmid = {40016446}, issn = {2045-2322}, abstract = {Urban expansion and changes in land use/land cover (LULC) have intensified in recent decades due to human activity, influencing ecological and developmental landscapes. This study investigated historical and projected LULC changes and urban growth patterns in the districts of Multan and Sargodha, Pakistan, using Landsat satellite imagery, cloud computing, and predictive modelling from 1990 to 2030. The analysis of satellite images was grouped into four time periods (1990-2000, 2000-2010, 2010-2020, and 2020-2030). The Google Earth Engine cloud-based platform facilitated the classification of Landsat 5 ETM (1990, 2000, and 2010) and Landsat 8 OLI (2020) images using the Random Forest model. A simulation model integrating Cellular Automata and an Artificial Neural Network Multilayer Perceptron in the MOLUSCE plugin of QGIS was employed to forecast urban growth to 2030. The resulting maps showed consistently high accuracy levels exceeding 92% for both districts across all time periods. The analysis revealed that Multan's built-up area increased from 240.56 km[2] (6.58%) in 1990 to 440.30 km[2] (12.04%) in 2020, while Sargodha experienced more dramatic growth from 730.91 km[2] (12.69%) to 1,029.07 km[2] (17.83%). Vegetation cover remained dominant but showed significant variations, particularly in peri-urban areas. By 2030, Multan's urban area is projected to stabilize at 433.22 km[2], primarily expanding in the southeastern direction. Sargodha is expected to reach 1,404.97 km[2], showing more balanced multi-directional growth toward the northeast and north. The study presents an effective analytical method integrating cloud processing, GIS, and change simulation modeling to evaluate urban growth spatiotemporal patterns and LULC changes. This approach successfully identified the main LULC transformations and trends in the study areas while highlighting potential urbanization zones where opportunities exist for developing planned and managed urban settlements.}, } @article {pmid39997132, year = {2025}, author = {Xiang, Z and Ying, F and Xue, X and Peng, X and Zhang, Y}, title = {Unmanned-Aerial-Vehicle Trajectory Planning for Reliable Edge Data Collection in Complex Environments.}, journal = {Biomimetics (Basel, Switzerland)}, volume = {10}, number = {2}, pages = {}, pmid = {39997132}, issn = {2313-7673}, support = {2022ZD0119103//National Science and Technology Major Project/ ; No. 62102350//National Natural Science Foundation of China/ ; }, abstract = {With the rapid advancement of edge-computing technology, more computing tasks are moving from traditional cloud platforms to edge nodes. This shift imposes challenges on efficiently handling the substantial data generated at the edge, especially in extreme scenarios, where conventional data collection methods face limitations. UAVs have emerged as a promising solution for overcoming these challenges by facilitating data collection and transmission in various environments. However, existing UAV trajectory optimization algorithms often overlook the critical factor of the battery capacity, leading to potential mission failures or safety risks. In this paper, we propose a trajectory planning approach Hyperion that incorporates charging considerations and employs a greedy strategy for decision-making to optimize the trajectory length and energy consumption. By ensuring the UAV's ability to return to the charging station after data collection, our method enhances task reliability and UAV adaptability in complex environments.}, } @article {pmid39997097, year = {2025}, author = {Huba, M and Bistak, P and Skrinarova, J and Vrancic, D}, title = {Performance Portrait Method: Robust Design of Predictive Integral Controller.}, journal = {Biomimetics (Basel, Switzerland)}, volume = {10}, number = {2}, pages = {}, pmid = {39997097}, issn = {2313-7673}, support = {1/0821/25//Scientific Grant Agency of the Ministry of Education, Research, Development and Youth of the Slovak Republic/ ; P2-0001//Slovenian Research and Innovation Agency/ ; L2-3166//Slovenian Research and Innovation Agency/ ; }, abstract = {The performance portrait method (PPM) can be characterized as a systematized digitalized version of the trial and error method-probably the most popular and very often used method of engineering work. Its digitization required the expansion of performance measures used to evaluate the step responses of dynamic systems. Based on process modeling, PPM also contributed to the classification of models describing linear and non-linear dynamic processes so that they approximate their dynamics using the smallest possible number of numerical parameters. From most bio-inspired procedures of artificial intelligence and optimization used for the design of automatic controllers, PPM is distinguished by the possibility of repeated application of once generated performance portraits (PPs). These represent information about the process obtained by evaluating the performance of setpoint and disturbance step responses for all relevant values of the determining loop parameters organized into a grid. It can be supported by the implementation of parallel calculations with optimized decomposition in the high-performance computing (HPC) cloud. The wide applicability of PPM ranges from verification of analytically calculated optimal settings achieved by various approaches to controller design, to the analysis as well as optimal and robust setting of controllers for processes where other known control design methods fail. One such situation is illustrated by an example of predictive integrating (PrI) controller design for processes with a dominant time-delayed sensor dynamics, representing a counterpart of proportional-integrating (PI) controllers, the most frequently used solutions in practice. PrI controllers can be considered as a generalization of the disturbance-response feedback-the oldest known method for the design of dead-time compensators by Reswick. In applications with dominant dead-time and loop time constants located in the feedback (sensors), as those, e.g., met in magnetoencephalography (MEG), it makes it possible to significantly improve the control performance. PPM shows that, despite the absence of effective analytical control design methods for such situations, it is possible to obtain high-quality optimal solutions for processes that require working with uncertain models specified by interval parameters, while achieving invariance to changes in uncertain parameters.}, } @article {pmid39995919, year = {2025}, author = {He, J and Sui, D and Li, L and Lv, X}, title = {Fueling the development of elderly care services in China with digital technology: A provincial panel data analysis.}, journal = {Heliyon}, volume = {11}, number = {3}, pages = {e41490}, pmid = {39995919}, issn = {2405-8440}, abstract = {BACKGROUND: The global demographic shift towards an aging population presents significant challenges to elderly care cervices, which encompass the range of services designed to meet the health and social needs of older adults. Particularly in China, the aging society's diverse needs are often met with service inadequacies and inefficient resource allocation within the elderly care cervices framework.

OBJECTIVE: This study aims to investigate the transformative potential of digital technology, which includes innovations such as e-commerce, cloud computing, and artificial intelligence, on elderly care cervices in China. The objective is to assess the impact of digital technology on service quality, resource allocation, and operational efficiency within the elderly care cervices domain.

METHODS: Employing Stata software, the study conducts an analysis of panel data from 30 Chinese provinces over the period from 2014 to 2021, examining the integration and application of digital technology within elderly care cervices to identify trends and correlations.

RESULTS: The findings reveal that the integration of digital technology significantly enhances elderly care cervices, improving resource allocation and personalizing care, which in turn boosts the quality of life for the elderly. Specifically, a one-percentage point increase in the development and adoption of digital technology within elderly care cervices is associated with a 21.5 percentage point increase in care quality.

CONCLUSION: This research underscores the pivotal role of digital technology in revolutionizing elderly care cervices. The findings offer a strategic guide for policymakers and stakeholders to effectively harness digital technology, addressing the challenges posed by an aging society and enhancing the efficiency and accessibility of elderly care cervices in China. The application of digital technology in elderly care cervices is set to become a cornerstone in the future of elderly care, ensuring that the needs of the aging population are met with innovative and compassionate solutions.}, } @article {pmid39995485, year = {2024}, author = {Awasthi, C and Awasthi, SP and Mishra, PK}, title = {Secure and Reliable Fog-Enabled Architecture Using Blockchain With Functional Biased Elliptic Curve Cryptography Algorithm for Healthcare Services.}, journal = {Blockchain in healthcare today}, volume = {7}, number = {}, pages = {}, pmid = {39995485}, issn = {2573-8240}, abstract = {Fog computing (FC) is an emerging technology that extends the capability and efficiency of cloud computing networks by acting as a bridge among the cloud and the device. Fog devices can process an enormous volume of information locally, are transportable, and can be deployed on a variety of systems. Because of its real-time processing and event reactions, it is ideal for healthcare. With such a wide range of characteristics, new security and privacy concerns arise. Due to the safe transmission, arrival, and access, as well as the availability of medical devices, security creates new issues in the area of healthcare. As an outcome, FC necessitates a unique approach to security and privacy metrics, as opposed to standard cloud computing methods. Hence, this article suggests an effective blockchain depending on secure healthcare services in FC. Here, the fog nodes gather the information from the medical sensor device and the data are validated using smart contracts in the blockchain network. We propose a functional biased elliptic curve cryptography algorithm to encrypt the data. The optimization is performed using the galactic bee colony optimization algorithm to enhance the procedure of encryption. The performance of the suggested methodology is assessed and contrasted with the traditional techniques. It is proved that the combination of FC with blockchain has increased the security of data transmission in healthcare services.}, } @article {pmid39990574, year = {2025}, author = {Jin, J and Li, B and Wang, X and Yang, X and Li, Y and Wang, R and Ye, C and Shu, J and Fan, Z and Xue, F and Ge, T and Ritchie, MD and Pasaniuc, B and Wojcik, G and Zhao, B}, title = {PennPRS: a centralized cloud computing platform for efficient polygenic risk score training in precision medicine.}, journal = {medRxiv : the preprint server for health sciences}, volume = {}, number = {}, pages = {}, pmid = {39990574}, support = {R00 HG012223/HG/NHGRI NIH HHS/United States ; R01 MH136055/MH/NIMH NIH HHS/United States ; RF1 AG082938/AG/NIA NIH HHS/United States ; }, abstract = {Polygenic risk scores (PRS) are becoming increasingly vital for risk prediction and stratification in precision medicine. However, PRS model training presents significant challenges for broader adoption of PRS, including limited access to computational resources, difficulties in implementing advanced PRS methods, and availability and privacy concerns over individual-level genetic data. Cloud computing provides a promising solution with centralized computing and data resources. Here we introduce PennPRS (https://pennprs.org), a scalable cloud computing platform for online PRS model training in precision medicine. We developed novel pseudo-training algorithms for multiple PRS methods and ensemble approaches, enabling model training without requiring individual-level data. These methods were rigorously validated through extensive simulations and large-scale real data analyses involving over 6,000 phenotypes across various data sources. PennPRS supports online single- and multi-ancestry PRS training with seven methods, allowing users to upload their own data or query from more than 27,000 datasets in the GWAS Catalog, submit jobs, and download trained PRS models. Additionally, we applied our pseudo-training pipeline to train PRS models for over 8,000 phenotypes and made their PRS weights publicly accessible. In summary, PennPRS provides a novel cloud computing solution to improve the accessibility of PRS applications and reduce disparities in computational resources for the global PRS research community.}, } @article {pmid39980331, year = {2025}, author = {Wolski, M and Woloszynski, T and Stachowiak, G and Podsiadlo, P}, title = {Bone Data Lake: A storage platform for bone texture analysis.}, journal = {Proceedings of the Institution of Mechanical Engineers. Part H, Journal of engineering in medicine}, volume = {239}, number = {2}, pages = {190-201}, doi = {10.1177/09544119251318434}, pmid = {39980331}, issn = {2041-3033}, mesh = {Humans ; *Image Processing, Computer-Assisted/methods ; Information Storage and Retrieval/methods ; Bone and Bones/diagnostic imaging ; }, abstract = {Trabecular bone (TB) texture regions selected on hand and knee X-ray images can be used to detect and predict osteoarthritis (OA). However, the analysis has been impeded by increasing data volume and diversification of data formats. To address this problem, a novel storage platform, called Bone Data Lake (BDL) is proposed for the collection and retention of large numbers of images, TB texture regions and parameters, regardless of their structure, size and source. BDL consists of three components, i.e.: a raw data storage, a processed data storage, and a data reference system. The performance of the BDL was evaluated using 20,000 knee and hand X-ray images of various formats (DICOM, PNG, JPEG, BMP, and compressed TIFF) and sizes (from 0.3 to 66.7 MB). The images were uploaded into BDL and automatically converted into a standardized 8-bit grayscale uncompressed TIFF format. TB regions of interest were then selected on the standardized images, and a data catalog containing metadata information about the regions was constructed. Next, TB texture parameters were calculated for the regions using Variance Orientation Transform (VOT) and Augmented VOT (AVOT) methods and stored in XLSX files. The files were uploaded into BDL, and then transformed into CSV files and cataloged. Results showed that the BDL efficiently transforms images and catalogs bone regions and texture parameters. BDL can serve as the foundation of a reliable, secure and collaborative system for OA detection and prediction based on radiographs and TB texture.}, } @article {pmid39979488, year = {2025}, author = {Shahid, U and Kanwal, S and Bano, M and Inam, S and Abdalla, MEM and Shaikh, ZA}, title = {Blockchain driven medical image encryption employing chaotic tent map in cloud computing.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {6236}, pmid = {39979488}, issn = {2045-2322}, abstract = {Data security during transmission over public networks has become a key concern in an era of rapid digitization. Image data is especially vulnerable since it can be stored or transferred using public cloud services, making it open to illegal access, breaches, and eavesdropping. This work suggests a novel way to integrate blockchain technology with a Chaotic Tent map encryption scheme in order to overcome these issues. The outcome is a Blockchain driven Chaotic Tent Map Encryption Scheme (BCTMES) for secure picture transactions. The idea behind this strategy is to ensure an extra degree of security by fusing the distributed and immutable properties of blockchain technology with the intricate encryption offered by chaotic maps. To ensure that the image is transformed into a cipher form that is resistant to several types of attacks, the proposed BCTMES first encrypts it using the Chaotic Tent map encryption technique. The accompanying signed document is safely kept on the blockchain, and this encrypted image is subsequently uploaded to the cloud. The integrity and authenticity of the image are confirmed upon retrieval by utilizing blockchain's consensus mechanism, adding another layer of security against manipulation. Comprehensive performance evaluations show that BCTMES provides notable enhancements in important security parameters, such as entropy, correlation coefficient, key sensitivity, peak signal-to-noise ratio (PSNR), unified average changing intensity (UACI), and number of pixels change rate (NPCR). In addition to providing good defense against brute-force attacks, the high key size of [Formula: see text] further strengthens the system's resilience. To sum up, the BCTMES effectively addresses a number of prevalent risks to picture security and offers a complete solution that may be implemented in cloud-based settings where data integrity and privacy are crucial. This work suggests a promising path for further investigation and practical uses in secure image transmission.}, } @article {pmid39975539, year = {2025}, author = {Quevedo, D and Do, K and Delic, G and Rodríguez-Borbón, J and Wong, BM and Ivey, CE}, title = {GPU Implementation of a Gas-Phase Chemistry Solver in the CMAQ Chemical Transport Model.}, journal = {ACS ES&T air}, volume = {2}, number = {2}, pages = {226-235}, pmid = {39975539}, issn = {2837-1402}, abstract = {The Community Multiscale Air Quality (CMAQ) model simulates atmospheric phenomena, including advection, diffusion, gas-phase chemistry, aerosol physics and chemistry, and cloud processes. Gas-phase chemistry is often a major computational bottleneck due to its representation as large systems of coupled nonlinear stiff differential equations. We leverage the parallel computational performance of graphics processing unit (GPU) hardware to accelerate the numerical integration of these systems in CMAQ's CHEM module. Our implementation, dubbed CMAQ-CUDA, in reference to its use in the Compute Unified Device Architecture (CUDA) general purpose GPU (GPGPU) computing solution, migrates CMAQ's Rosenbrock solver from Fortran to CUDA Fortran. CMAQ-CUDA accelerates the Rosenbrock solver such that simulations using the chemical mechanisms RACM2, CB6R5, and SAPRC07 require only 51%, 50%, or 35% as much time, respectively, as CMAQv5.4 to complete a chemistry time step. Our results demonstrate that CMAQ is amenable to GPU acceleration and highlight a novel Rosenbrock solver implementation for reducing the computational burden imposed by the CHEM module.}, } @article {pmid39973850, year = {2025}, author = {Wu, S and Bin, G and Shi, W and Lin, L and Xu, Y and Zhao, D and Morgan, SP and Sun, S}, title = {Empowering diabetic foot ulcer prevention: A novel cloud-based plantar pressure monitoring system for enhanced self-care.}, journal = {Technology and health care : official journal of the European Society for Engineering and Medicine}, volume = {33}, number = {2}, pages = {701-718}, doi = {10.1177/09287329241290943}, pmid = {39973850}, issn = {1878-7401}, mesh = {Humans ; *Diabetic Foot/prevention & control/therapy ; *Self Care/methods ; *Pressure ; Cloud Computing ; Wearable Electronic Devices ; Mobile Applications ; Foot/physiopathology ; Male ; }, abstract = {BackgroundThis study was prompted by the crucial impact of abnormal plantar pressure on diabetic foot ulcer development and the notable lack of its monitoring in daily life. Our research introduces a cloud-based, user-friendly plantar pressure monitoring system designed for seamless integration into daily routines.ObjectiveThis innovative system aims to enable early ulcer prediction and proactive prevention, thereby substantially improving diabetic foot care through enhanced self-care and timely intervention.MethodsA novel, user-centric plantar pressure monitoring system was developed, integrating a wearable device, mobile application, and cloud computing for instantaneous diabetic foot care. This configuration facilitates comprehensive monitoring at 64 underfoot points. It encourages user engagement in health management. The system wirelessly transmits data to the cloud, where insights are processed and made available on the app, fostering proactive self-care through immediate feedback. Tailored for daily use, our system streamlines home monitoring, enhancing early ulcer detection and preventative measures.ResultsA feasibility study validated our system's accuracy, demonstrating a relative error of approximately 4% compared to a commercial pressure sensing walkway. This precision affirms the system's efficacy for home-based monitoring and its potential in diabetic foot ulcer prevention, positioning it as a viable instrument for self-managed care.ConclusionsThe system dynamically captures and analyzes plantar pressure distribution and gait cycle details, highlighting its utility in early diabetic foot ulcer detection and management. Offering real-time, actionable data, it stands as a critical tool for individuals to actively participate in their foot health care, epitomizing the essence of self-managed healthcare practices.}, } @article {pmid39968172, year = {2025}, author = {Balamurugan, M and Narayanan, K and Raghu, N and Arjun Kumar, GB and Trupti, VN}, title = {Role of artificial intelligence in smart grid - a mini review.}, journal = {Frontiers in artificial intelligence}, volume = {8}, number = {}, pages = {1551661}, pmid = {39968172}, issn = {2624-8212}, abstract = {A smart grid is a structure that regulates, operates, and utilizes energy sources that are incorporated into the smart grid using smart communications techniques and computerized techniques. The running and maintenance of Smart Grids now depend on artificial intelligence methods quite extensively. Artificial intelligence is enabling more dependable, efficient, and sustainable energy systems from improving load forecasting accuracy to optimizing power distribution and guaranteeing issue identification. An intelligent smart grid will be created by substituting artificial intelligence for manual tasks and achieving high efficiency, dependability, and affordability across the energy supply chain from production to consumption. Collection of a large diversity of data is vital to make effective decisions. Artificial intelligence application operates by processing abundant data samples, advanced computing, and strong communication collaboration. The development of appropriate infrastructure resources, including big data, cloud computing, and other collaboration platforms, must be enhanced for this type of operation. In this paper, an attempt has been made to summarize the artificial intelligence techniques used in various aspects of smart grid system.}, } @article {pmid39966459, year = {2025}, author = {Zan, T and Jia, X and Guo, X and Wang, M and Gao, X and Gao, P}, title = {Research on variable-length control chart pattern recognition based on sliding window method and SECNN-BiLSTM.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {5921}, pmid = {39966459}, issn = {2045-2322}, support = {51975020//National Natural Science Foundation of China/ ; 3202005//Beijing Natural Science Foundation/ ; }, abstract = {Control charts, as essential tools in Statistical Process Control (SPC), are frequently used to analyze whether production processes are under control. Most existing control chart recognition methods target fixed-length data, failing to meet the needs of recognizing variable-length control charts in production. This paper proposes a variable-length control chart recognition method based on Sliding Window Method and SE-attention CNN and Bi-LSTM (SECNN-BiLSTM). A cloud-edge integrated recognition system was developed using wireless digital calipers, embedded devices, and cloud computing. Different length control chart data is transformed from one-dimensional to two-dimensional matrices using a sliding window approach and then fed into a deep learning network combining SE-attention CNN and Bi-LSTM. This network, inspired by residual structures, extracts multiple features to build a control chart recognition model. Simulations, the cloud-edge recognition system, and engineering applications demonstrate that this method efficiently and accurately recognizes variable-length control charts, establishing a foundation for more efficient pattern recognition.}, } @article {pmid39965494, year = {2025}, author = {Pricope, NG and Dalton, EG}, title = {Mapping coastal resilience: Precision insights for green infrastructure suitability.}, journal = {Journal of environmental management}, volume = {376}, number = {}, pages = {124511}, doi = {10.1016/j.jenvman.2025.124511}, pmid = {39965494}, issn = {1095-8630}, mesh = {North Carolina ; *Ecosystem ; Climate Change ; Floods ; City Planning ; Conservation of Natural Resources/methods ; }, abstract = {Addressing the need for effective flood risk mitigation strategies and enhanced urban resilience to climate change, we introduce a cloud-computed Green Infrastructure Suitability Index (GISI) methodology. This approach combines remote sensing and geospatial modeling to create a cloud-computed blend that synthesizes land cover classifications, biophysical variables, and flood exposure data to map suitability for green infrastructure (GI) implementation at both street and landscape levels. The GISI methodology provides a flexible and robust tool for urban planning, capable of accommodating diverse data inputs and adjustments, making it suitable for various geographic contexts. Applied within the Wilmington Urban Area Metropolitan Planning Organization (WMPO) in North Carolina, USA, our findings show that residential parcels, constituting approximately 91% of the total identified suitable areas, are optimally positioned for GI integration. This underscores the potential for embedding GI within developed residential urban landscapes to bolster ecosystem and community resilience. Our analysis indicates that 7.19% of the WMPO area is highly suitable for street-level GI applications, while 1.88% is ideal for landscape GI interventions, offering opportunities to enhance stormwater management and biodiversity at larger and more connected spatial scales. By identifying specific parcels with high suitability for GI, this research provides a comprehensive and transferable, data-driven foundation for local and regional planning efforts. The scalability and adaptability of the proposed modeling approach make it a powerful tool for informing sustainable urban development practices. Future work will focus on more spatially-resolved models of these areas and the exploration of GI's multifaceted benefits at the local level, aiming to guide the deployment of GI projects that align with broader environmental and social objectives.}, } @article {pmid39964566, year = {2025}, author = {Bathelt, F and Lorenz, S and Weidner, J and Sedlmayr, M and Reinecke, I}, title = {Application of Modular Architectures in the Medical Domain - a Scoping Review.}, journal = {Journal of medical systems}, volume = {49}, number = {1}, pages = {27}, pmid = {39964566}, issn = {1573-689X}, support = {01KX2121,01ZZ2101A//Bundesministerium für Bildung und Forschung/ ; }, mesh = {Humans ; *Computer Security ; Systems Integration ; Information Systems/organization & administration ; Electronic Health Records/organization & administration ; }, abstract = {The healthcare sector is notable for its reliance on discrete, self-contained information systems, which are often characterised by the presence of disparate data silos. The growing demands for documentation, quality assurance, and secondary use of medical data for research purposes has underscored the necessity for solutions that are more flexible, straightforward to maintain and interoperable. In this context, modular systems have the potential to act as a catalyst for change, offering the capacity to encapsulate and combine functionalities in an adaptable manner. The objective of this scoping review is to determine the extent to which modular systems are employed in the medical field. The review will provide a detailed overview of the effectiveness of service-oriented or microservice architectures, the challenges that should be addressed during implementation, and the lessons that can be learned from countries with productive use of such modular architectures. The review shows a rise in the use of microservices, indicating a shift towards encapsulated autonomous functions. The implementation should use HL7 FHIR as communication standard, deploy RESTful interfaces and standard protocols for technical data exchange, and apply HIPAA security rule for security purposes. User involvement is essential, as is integrating services into existing workflows. Modular architectures can facilitate flexibility and scalability. However, there are well-documented performance issues associated with microservice architectures, namely a high communication demand. One potential solution to this problem may be to integrate modular architectures into a cloud computing environment, which would require further investigation.}, } @article {pmid39963423, year = {2024}, author = {Kelliher, JM and Xu, Y and Flynn, MC and Babinski, M and Canon, S and Cavanna, E and Clum, A and Corilo, YE and Fujimoto, G and Giberson, C and Johnson, LYD and Li, KJ and Li, PE and Li, V and Lo, CC and Lynch, W and Piehowski, P and Prime, K and Purvine, S and Rodriguez, F and Roux, S and Shakya, M and Smith, M and Sarrafan, S and Cholia, S and McCue, LA and Mungall, C and Hu, B and Eloe-Fadrosh, EA and Chain, PSG}, title = {Standardized and accessible multi-omics bioinformatics workflows through the NMDC EDGE resource.}, journal = {Computational and structural biotechnology journal}, volume = {23}, number = {}, pages = {3575-3583}, pmid = {39963423}, issn = {2001-0370}, abstract = {Accessible and easy-to-use standardized bioinformatics workflows are necessary to advance microbiome research from observational studies to large-scale, data-driven approaches. Standardized multi-omics data enables comparative studies, data reuse, and applications of machine learning to model biological processes. To advance broad accessibility of standardized multi-omics bioinformatics workflows, the National Microbiome Data Collaborative (NMDC) has developed the Empowering the Development of Genomics Expertise (NMDC EDGE) resource, a user-friendly, open-source web application (https://nmdc-edge.org). Here, we describe the design and main functionality of the NMDC EDGE resource for processing metagenome, metatranscriptome, natural organic matter, and metaproteome data. The architecture relies on three main layers (web application, orchestration, and execution) to ensure flexibility and expansion to future workflows. The orchestration and execution layers leverage best practices in software containers and accommodate high-performance computing and cloud computing services. Further, we have adopted a robust user research process to collect feedback for continuous improvement of the resource. NMDC EDGE provides an accessible interface for researchers to process multi-omics microbiome data using production-quality workflows to facilitate improved data standardization and interoperability.}, } @article {pmid39960376, year = {2025}, author = {Dinpajooh, M and Hightower, GL and Overstreet, RE and Metz, LA and Henson, NJ and Govind, N and Ritzmann, AM and Uhnak, NE}, title = {On the stability constants of metal-nitrate complexes in aqueous solutions.}, journal = {Physical chemistry chemical physics : PCCP}, volume = {}, number = {}, pages = {}, doi = {10.1039/d4cp04295f}, pmid = {39960376}, issn = {1463-9084}, abstract = {Stability constants of simple reactions involving addition of the NO3[-] ion to hydrated metal complexes, [M(H2O)x][n+] are calculated with a computational workflow developed using cloud computing resources. The computational workflow performs conformational searches for metal complexes at both low and high levels of theories in conjunction with a continuum solvation model (CSM). The low-level theory is mainly used for the initial conformational searches, which are complemented with high-level density functional theory conformational searches in the CSM framework to determine the coordination chemistry relevant for stability constant calculations. In this regard, the lowest energy conformations are found to obtain the reaction free energies for the addition of one NO3[-] to [M(H2O)x][n+] complexes, where M represents Fe(II), Fe(III), Sr(II), Ce(III), Ce(IV), and U(VI), respectively. Structural analysis of hundreds of optimized geometries at high-level theory reveals that NO3[-] coordinates with Fe(II) and Fe(III) in either a monodentate or bidentate manner. Interestingly, the lowest-energy conformations of Fe(II) metal-nitrate complexes exhibit monodentate or bidentate coordination with a coordination number of 6 while the bidentate seven-coordinated Fe(II) metal-nitrate complexes are approximately 2 kcal mol[-1] higher in energy. Notably, for Fe(III) metal-nitrate complexes, the bidentate seven-coordinated configuration is more stable than the six-coordinated Fe(II) complexes (monodentate or bidentate) by a few thermal energy units. In contrast, Sr(II), Ce(III), Ce(IV), and U(VI) metal ions predominantly coordinate with NO3[-] in a bidentate manner, exhibiting typical coordination numbers of 7, 9, 9, and 5, respectively. Stability constants are accordingly calculated using linear free energy approaches to account for the systematic errors and good agreements are obtained between the calculated stability constants and the available experimental data.}, } @article {pmid39959477, year = {2025}, author = {Thilakarathne, NN and Abu Bakar, MS and Abas, PE and Yassin, H}, title = {Internet of things enabled smart agriculture: Current status, latest advancements, challenges and countermeasures.}, journal = {Heliyon}, volume = {11}, number = {3}, pages = {e42136}, pmid = {39959477}, issn = {2405-8440}, abstract = {It is no wonder that agriculture plays a vital role in the development of some countries when their economies rely on agricultural activities and the production of food for human survival. Owing to the ever-increasing world population, estimated at 7.9 billion in 2022, feeding this number of people has become a concern due to the current rate of agricultural food production subjected to various reasons. The advent of the Internet of Things (IoT) based technologies in the 21st century has led to the reshaping of every industry, including agriculture, and has paved the way for smart agriculture, with the technology used towards automating and controlling most aspects of traditional agriculture. Smart agriculture, interchangeably known as smart farming, utilizes IoT and related enabling technologies such as cloud computing, artificial intelligence, and big data in agriculture and offers the potential to enhance agricultural operations by automating and making intelligent decisions, resulting in increased efficiency and a better yield with minimum waste. Consequently, most governments are spending more money and offering incentives to switch from traditional to smart agriculture. Nonetheless, the COVID-19 global pandemic served as a catalyst for change in the agriculture industry, driving a shift toward greater reliance on technology over traditional labor for agricultural tasks. In this regard, this research aims to synthesize the current knowledge of smart agriculture, highlighting its current status, main components, latest application areas, advanced agricultural practices, hardware and software used, success stores, potential challenges, and countermeasures to them, and future trends, for the growth of the industry as well as a reference to future research.}, } @article {pmid39949325, year = {2025}, author = {Wyman, A and Zhang, Z}, title = {A Tutorial on the Use of Artificial Intelligence Tools for Facial Emotion Recognition in R.}, journal = {Multivariate behavioral research}, volume = {}, number = {}, pages = {1-15}, doi = {10.1080/00273171.2025.2455497}, pmid = {39949325}, issn = {1532-7906}, abstract = {Automated detection of facial emotions has been an interesting topic for multiple decades in social and behavioral research but is only possible very recently. In this tutorial, we review three popular artificial intelligence based emotion detection programs that are accessible to R programmers: Google Cloud Vision, Amazon Rekognition, and Py-Feat. We present their advantages, disadvantages, and provide sample code so that researchers can immediately begin designing, collecting, and analyzing emotion data. Furthermore, we provide an introductory level explanation of the machine learning, deep learning, and computer vision algorithms that underlie most emotion detection programs in order to improve literacy of explainable artificial intelligence in the social and behavioral science literature.}, } @article {pmid39946685, year = {2025}, author = {Guturu, H and Nichols, A and Cantrell, LS and Just, S and Kis, J and Platt, T and Mohtashemi, I and Wang, J and Batzoglou, S}, title = {Cloud-Enabled Scalable Analysis of Large Proteomics Cohorts.}, journal = {Journal of proteome research}, volume = {24}, number = {3}, pages = {1462-1469}, doi = {10.1021/acs.jproteome.4c00771}, pmid = {39946685}, issn = {1535-3907}, mesh = {*Proteomics/methods/statistics & numerical data ; *Cloud Computing ; *Algorithms ; *Software ; Humans ; Search Engine ; Cohort Studies ; Mass Spectrometry/methods ; }, abstract = {Rapid advances in depth and throughput of untargeted mass-spectrometry-based proteomic technologies enable large-scale cohort proteomic and proteogenomic analyses. As such, the data infrastructure and search engines required to process data must also scale. This challenge is amplified in search engines that rely on library-free match between runs (MBR) search, which enable enhanced depth-per-sample and data completeness. However, to date, no MBR-based search could scale to process cohorts of thousands or more individuals. Here, we present a strategy to deploy search engines in a distributed cloud environment without source code modification, thereby enhancing resource scalability and throughput. Additionally, we present an algorithm, Scalable MBR, that replicates the MBR procedure of popular DIA-NN software for scalability to thousands of samples. We demonstrate that Scalable MBR can search thousands of MS raw files in a few hours compared to days required for the original DIA-NN MBR procedure and demonstrate that the results are almost indistinguishable to those of DIA-NN native MBR. We additionally show that empirical spectra generated by Scalable MBR better approximates DIA-NN native MBR compared to semiempirical alternatives such as ID-RT-IM MBR, preserving user choice to use empirical libraries in large cohort analysis. The method has been tested to scale to over 15,000 injections and is available for use in the Proteograph Analysis Suite.}, } @article {pmid39943558, year = {2025}, author = {Li, H and Chung, H}, title = {Prediction of Member Forces of Steel Tubes on the Basis of a Sensor System with the Use of AI.}, journal = {Sensors (Basel, Switzerland)}, volume = {25}, number = {3}, pages = {}, pmid = {39943558}, issn = {1424-8220}, abstract = {The rapid development of AI (artificial intelligence), sensor technology, high-speed Internet, and cloud computing has demonstrated the potential of data-driven approaches in structural health monitoring (SHM) within the field of structural engineering. Algorithms based on machine learning (ML) models are capable of discerning intricate structural behavioral patterns from real-time data gathered by sensors, thereby offering solutions to engineering quandaries in structural mechanics and SHM. This study presents an innovative approach based on AI and a fiber-reinforced polymer (FRP) double-helix sensor system for the prediction of forces acting on steel tube members in offshore wind turbine support systems; this enables structural health monitoring of the support system. The steel tube as the transitional member and the FRP double helix-sensor system were initially modeled in three dimensions using ABAQUS finite element software. Subsequently, the data obtained from the finite element analysis (FEA) were inputted into a fully connected neural network (FCNN) model, with the objective of establishing a nonlinear mapping relationship between the inputs (strain) and the outputs (reaction force). In the FCNN model, the impact of the number of input variables on the model's predictive performance is examined through cross-comparison of different combinations and positions of the six sets of input variables. And based on an evaluation of engineering costs and the number of strain sensors, a series of potential combinations of variables are identified for further optimization. Furthermore, the potential variable combinations were optimized using a convolutional neural network (CNN) model, resulting in optimal input variable combinations that achieved the accuracy level of more input variable combinations with fewer sensors. This not only improves the prediction performance of the model but also effectively controls the engineering cost. The model performance was evaluated using several metrics, including R[2], MSE, MAE, and SMAPE. The results demonstrated that the CNN model exhibited notable advantages in terms of fitting accuracy and computational efficiency when confronted with a limited data set. To provide further support for practical applications, an interactive graphical user interface (GUI)-based sensor-coupled mechanical prediction system for steel tubes was developed. This system enables engineers to predict the member forces of steel tubes in real time, thereby enhancing the efficiency and accuracy of SHM for offshore wind turbine support systems.}, } @article {pmid39943553, year = {2025}, author = {Alboqmi, R and Gamble, RF}, title = {Enhancing Microservice Security Through Vulnerability-Driven Trust in the Service Mesh Architecture.}, journal = {Sensors (Basel, Switzerland)}, volume = {25}, number = {3}, pages = {}, pmid = {39943553}, issn = {1424-8220}, abstract = {Cloud-native computing enhances the deployment of microservice architecture (MSA) applications by improving scalability and resilience, particularly in Beyond 5G (B5G) environments such as Sixth-Generation (6G) networks. This is achieved through the ability to replace traditional hardware dependencies with software-defined solutions. While service meshes enable secure communication for deployed MSAs, they struggle to identify vulnerabilities inherent to microservices. The reliance on third-party libraries and modules, essential for MSAs, introduces significant supply chain security risks. Implementing a zero-trust approach for MSAs requires robust mechanisms to continuously verify and monitor the software supply chain of deployed microservices. However, existing service mesh solutions lack runtime trust evaluation capabilities for continuous vulnerability assessment of third-party libraries and modules. This paper introduces a mechanism for continuous runtime trust evaluation of microservices, integrating vulnerability assessments within a service mesh to enhance the deployed MSA application. The proposed approach dynamically assigns trust scores to deployed microservices, rewarding secure practices such as timely vulnerability patching. It also enables the sharing of assessment results, enhancing mitigation strategies across the deployed MSA application. The mechanism is evaluated using the Train Ticket MSA, a complex open-source benchmark MSA application deployed with Docker containers, orchestrated using Kubernetes, and integrated with the Istio service mesh. Results demonstrate that the enhanced service mesh effectively supports dynamic trust evaluation based on the vulnerability posture of deployed microservices, significantly improving MSA security and paving the way for future self-adaptive solutions.}, } @article {pmid39943369, year = {2025}, author = {Abushark, YB and Hassan, S and Khan, AI}, title = {Optimized Adaboost Support Vector Machine-Based Encryption for Securing IoT-Cloud Healthcare Data.}, journal = {Sensors (Basel, Switzerland)}, volume = {25}, number = {3}, pages = {}, pmid = {39943369}, issn = {1424-8220}, support = {GPIP: 1839-611-2024//King Abdulaziz University/ ; }, mesh = {*Computer Security ; *Support Vector Machine ; *Internet of Things ; *Cloud Computing ; *Algorithms ; *Confidentiality ; Humans ; Delivery of Health Care ; }, abstract = {The Internet of Things (IoT) connects various medical devices that enable remote monitoring, which can improve patient outcomes and help healthcare providers deliver precise diagnoses and better service to patients. However, IoT-based healthcare management systems face significant challenges in data security, such as maintaining a triad of confidentiality, integrity, and availability (CIA) and securing data transmission. This paper proposes a novel AdaBoost support vector machine (ASVM) based on the grey wolf optimization and international data encryption algorithm (ASVM-based GWO-IDEA) to secure medical data in an IoT-enabled healthcare system. The primary objective of this work was to prevent possible cyberattacks, unauthorized access, and tampering with the security of such healthcare systems. The proposed scheme encodes the healthcare data before transmitting them, protecting them from unauthorized access and other network vulnerabilities. The scheme was implemented in Python, and its efficiency was evaluated using a Kaggle-based public healthcare dataset. The performance of the model/scheme was evaluated with existing strategies in the context of effective security parameters, such as the confidentiality rate and throughput. When using the suggested methodology, the data transmission process was improved and achieved a high throughput of 97.86%, an improved resource utilization degree of 98.45%, and a high efficiency of 93.45% during data transmission.}, } @article {pmid39943356, year = {2025}, author = {Mahedero Biot, F and Fornes-Leal, A and Vaño, R and Reinosa Simón, R and Lacalle, I and Guardiola, C and Palau, CE}, title = {A Novel Orchestrator Architecture for Deploying Virtualized Services in Next-Generation IoT Computing Ecosystems.}, journal = {Sensors (Basel, Switzerland)}, volume = {25}, number = {3}, pages = {}, pmid = {39943356}, issn = {1424-8220}, support = {957258//European Commission/ ; 101069732//European Commission/ ; }, abstract = {The Next-Generation IoT integrates diverse technological enablers, allowing the creation of advanced systems with increasingly complex requirements and maximizing the use of available IoT-edge-cloud resources. This paper introduces an orchestrator architecture for dynamic IoT scenarios, inspired by ETSI NFV MANO and Cloud Native principles, where distributed computing nodes often have unfixed and changing networking configurations. Unlike traditional approaches, this architecture also focuses on managing services across massively distributed mobile nodes, as demonstrated in the automotive use case presented. Apart from working as MANO framework, the proposed solution efficiently handles service lifecycle management in large fleets of vehicles without relying on public or static IP addresses for connectivity. Its modular, microservices-based approach ensures adaptability to emerging trends like Edge Native, WebAssembly and RISC-V, positioning it as a forward-looking innovation for IoT ecosystems.}, } @article {pmid39943326, year = {2025}, author = {Khan, FU and Shah, IA and Jan, S and Ahmad, S and Whangbo, T}, title = {Machine Learning-Based Resource Management in Fog Computing: A Systematic Literature Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {25}, number = {3}, pages = {}, pmid = {39943326}, issn = {1424-8220}, mesh = {*Machine Learning ; Humans ; Deep Learning ; }, abstract = {This systematic literature review analyzes machine learning (ML)-based techniques for resource management in fog computing. Utilizing the Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) protocol, this paper focuses on ML and deep learning (DL) solutions. Resource management in the fog computing domain was thoroughly analyzed by identifying the key factors and constraints. A total of 68 research papers of extended versions were finally selected and included in this study. The findings highlight a strong preference for DL in addressing resource management challenges within a fog computing paradigm, i.e., 66% of the reviewed articles leveraged DL techniques, while 34% utilized ML. Key factors such as latency, energy consumption, task scheduling, and QoS are interconnected and critical for resource management optimization. The analysis reveals that latency, energy consumption, and QoS are the prime factors addressed in the literature on ML-based fog computing resource management. Latency is the most frequently addressed parameter, investigated in 77% of the articles, followed by energy consumption and task scheduling at 44% and 33%, respectively. Furthermore, according to our evaluation, an extensive range of challenges, i.e., computational resource and latency, scalability and management, data availability and quality, and model complexity and interpretability, are addressed by employing 73, 53, 45, and 46 ML/DL techniques, respectively.}, } @article {pmid39943311, year = {2025}, author = {Ogwara, NO and Petrova, K and Yang, MLB and MacDonell, SG}, title = {MINDPRES: A Hybrid Prototype System for Comprehensive Data Protection in the User Layer of the Mobile Cloud.}, journal = {Sensors (Basel, Switzerland)}, volume = {25}, number = {3}, pages = {}, pmid = {39943311}, issn = {1424-8220}, abstract = {Mobile cloud computing (MCC) is a technological paradigm for providing services to mobile device (MD) users. A compromised MD may cause harm to both its user and to other MCC customers. This study explores the use of machine learning (ML) models and stochastic methods for the protection of Android MDs connected to the mobile cloud. To test the validity and feasibility of the proposed models and methods, the study adopted a proof-of-concept approach and developed a prototype system named MINDPRESS. The static component of MINDPRES assesses the risk of the apps installed on the MD. It uses a device-based ML model for static feature analysis and a cloud-based stochastic risk evaluator. The device-based hybrid component of MINDPRES monitors app behavior in real time. It deploys two ML models and functions as an intrusion detection and prevention system (IDPS). The performance evaluation results of the prototype showed that the accuracy achieved by the methods for static and hybrid risk evaluation compared well with results reported in recent work. Power consumption data indicated that MINDPRES did not create an overload. This study contributes a feasible and scalable framework for building distributed systems for the protection of the data and devices of MCC customers.}, } @article {pmid39943099, year = {2025}, author = {Cabrera, VE and Bewley, J and Breunig, M and Breunig, T and Cooley, W and De Vries, A and Fourdraine, R and Giordano, JO and Gong, Y and Greenfield, R and Hu, H and Lenkaitis, A and Niu, M and Noronha, EAF and Sullivan, M}, title = {Data Integration and Analytics in the Dairy Industry: Challenges and Pathways Forward.}, journal = {Animals : an open access journal from MDPI}, volume = {15}, number = {3}, pages = {}, pmid = {39943099}, issn = {2076-2615}, support = {2023-77039-41033//National Agricultural Producers Data Cooperative (NAPDC) and the USDA/ ; }, abstract = {The dairy industry faces significant challenges in data integration and analysis, which are critical for informed decision-making, operational optimization, and sustainability. Data integration-combining data from diverse sources, such as herd management systems, sensors, and diagnostics-remains difficult due to the lack of standardization, infrastructure barriers, and proprietary concerns. This commentary explores these issues based on insights from a multidisciplinary group of stakeholders, including industry experts, researchers, and practitioners. Key challenges discussed include the absence of a national animal identification system in the US, high IT resource costs, reluctance to share data due to competitive disadvantages, and differences in global data handling practices. Proposed pathways forward include developing comprehensive data integration guidelines, enhancing farmer awareness through training programs, and fostering collaboration across industry, academia, and technology providers. Additional recommendations involve improving data exchange standards, addressing interoperability issues, and leveraging advanced technologies, such as artificial intelligence and cloud computing. Emphasis is placed on localized data integration solutions for farm-level benefits and broader research applications to advance sustainability, traceability, and profitability within the dairy supply chain. These outcomes provide a foundation for achieving streamlined data systems, enabling actionable insights, and fostering innovation in the dairy industry.}, } @article {pmid39926141, year = {2024}, author = {Bhat, SN and Jindal, GD and Nagare, GD}, title = {Development and Validation of Cloud-based Heart Rate Variability Monitor.}, journal = {Journal of medical physics}, volume = {49}, number = {4}, pages = {654-660}, pmid = {39926141}, issn = {0971-6203}, abstract = {CONTEXT: This article introduces a new cloud-based point-of-care system to monitor heart rate variability (HRV).

AIMS: Medical investigations carried out at dispensaries or hospitals impose substantial physiological and psychological stress (white coat effect), disrupting cardiovascular homeostasis, which can be taken care by point-of-care cloud computing system to facilitate secure patient monitoring.

SETTINGS AND DESIGN: The device employs MAX30102 sensor to collect peripheral pulse signal using photoplethysmography technique. The non-invasive design ensures patient compliance while delivering critical insights into Autonomic Nervous System activity. Preliminary validations indicate the system's potential to enhance clinical outcomes by supporting timely, data-driven therapeutic adjustments based on HRV metrics.

SUBJECTS AND METHODS: This article explores the system's development, functionality, and reliability. System designed is validated with peripheral pulse analyzer (PPA), a research product of electronics division, Bhabha Atomic Research Centre.

STATISTICAL ANALYSIS USED: The output of developed HRV monitor (HRVM) is compared using Pearson's correlation and Mann-Whitney U-test with output of PPA. Peak positions and spectrum values are validated using Pearson's correlation, mean error, standard deviation (SD) of error, and range of error. HRV parameters such as total power, mean, peak amplitude, and power in very low frequency, low frequency, and high frequency bands are validated using Mann-Whitney U-test.

RESULTS: Pearson's correlation for spectrum values has been found to be more than 0.97 in all the subjects. Mean error, SD of error, and range of error are found to be in acceptable range.

CONCLUSIONS: Statistical results validate the new HRVM system against PPA for use in cloud computing and point-of-care testing.}, } @article {pmid39922158, year = {2025}, author = {He, C and Zhao, Z and Zhang, X and Yu, H and Wang, R}, title = {RotInv-PCT: Rotation-Invariant Point Cloud Transformer via feature separation and aggregation.}, journal = {Neural networks : the official journal of the International Neural Network Society}, volume = {185}, number = {}, pages = {107223}, doi = {10.1016/j.neunet.2025.107223}, pmid = {39922158}, issn = {1879-2782}, mesh = {*Neural Networks, Computer ; Rotation ; Algorithms ; Humans ; Cloud Computing ; }, abstract = {The widespread use of point clouds has spurred the rapid development of neural networks for point cloud processing. A crucial property of these networks is maintaining consistent output results under random rotations of the input point cloud, namely, rotation invariance. The dominant approach achieves rotation invariance is to construct local coordinate systems for computing invariant local point cloud coordinates. However, this method neglects the relative pose relationships between local point cloud structures, leading to a decline in network performance. To address this limitation, we propose a novel Rotation-Invariant Point Cloud Transformer (RotInv-PCT). This method extracts the local abstract shape features of the point cloud using Local Reference Frames (LRFs) and explicitly computes the spatial relative pose features between local point clouds, both of which are proven to be rotation-invariant. Furthermore, to capture the long-range pose dependencies between points, we introduce an innovative Feature Aggregation Transformer (FAT) model, which seamlessly fuses the pose features with the shape features to obtain a globally rotation-invariant representation. Moreover, to manage large-scale point clouds, we utilize hierarchical random downsampling to gradually decrease the scale of point clouds, followed by feature aggregation through FAT. To demonstrate the effectiveness of RotInv-PCT, we conducted comparative experiments across various tasks and datasets, including point cloud classification on ScanObjectNN and ModelNet40, part segmentation on ShapeNet, and semantic segmentation on S3DIS and KITTI. Thanks to our provable rotation-invariant features and FAT, our method generally outperforms state-of-the-art networks. In particular, we highlight that RotInv-PCT achieved a 2% improvement in real-world point cloud classification tasks compared to the strongest baseline. Furthermore, in the semantic segmentation task, we improved the performance on the S3DIS dataset by 10% and, for the first time, realized rotation-invariant point cloud semantic segmentation on the KITTI dataset.}, } @article {pmid39917656, year = {2024}, author = {Nantakeeratipat, T and Apisaksirikul, N and Boonrojsaree, B and Boonkijkullatat, S and Simaphichet, A}, title = {Automated machine learning for image-based detection of dental plaque on permanent teeth.}, journal = {Frontiers in dental medicine}, volume = {5}, number = {}, pages = {1507705}, pmid = {39917656}, issn = {2673-4915}, abstract = {INTRODUCTION: To detect dental plaque, manual assessment and plaque-disclosing dyes are commonly used. However, they are time-consuming and prone to human error. This study aims to investigate the feasibility of using Google Cloud's Vertex artificial intelligence (AI) automated machine learning (AutoML) to develop a model for detecting dental plaque levels on permanent teeth using undyed photographic images.

METHODS: Photographic images of both undyed and corresponding erythrosine solution-dyed upper anterior permanent teeth from 100 dental students were captured using a smartphone camera. All photos were cropped to individual tooth images. Dyed images were analyzed to classify plaque levels based on the percentage of dyed surface area: mild (<30%), moderate (30%-60%), and heavy (>60%) categories. These true labels were used as the ground truth for undyed images. Two AutoML models, a three-class model (mild, moderate, heavy plaque) and a two-class model (acceptable vs. unacceptable plaque), were developed using undyed images in Vertex AI environment. Both models were evaluated based on precision, recall, and F1-score.

RESULTS: The three-class model achieved an average precision of 0.907, with the highest precision (0.983) in the heavy plaque category. Misclassifications were more common in the mild and moderate categories. The two-class acceptable-unacceptable model demonstrated improved performance with an average precision of 0.964 and an F1-score of 0.931.

CONCLUSION: This study demonstrated the potential of Vertex AI AutoML for non-invasive detection of dental plaque. While the two-class model showed promise for clinical use, further studies with larger datasets are recommended to enhance model generalization and real-world applicability.}, } @article {pmid39906796, year = {2025}, author = {Saadati, S and Sepahvand, A and Razzazi, M}, title = {Cloud and IoT based smart agent-driven simulation of human gait for detecting muscles disorder.}, journal = {Heliyon}, volume = {11}, number = {2}, pages = {e42119}, pmid = {39906796}, issn = {2405-8440}, abstract = {Motion disorders affect a significant portion of the global population. While some symptoms can be managed with medications, these treatments often impact all muscles uniformly, not just the affected ones, leading to potential side effects including involuntary movements, confusion, and decreased short-term memory. Currently, there is no dedicated application for differentiating healthy muscles from abnormal ones. Existing analysis applications, designed for other purposes, often lack essential software engineering features such as a user-friendly interface, infrastructure independence, usability and learning ability, cloud computing capabilities, and AI-based assistance. This research proposes a computer-based methodology to analyze human motion and differentiate between healthy and unhealthy muscles. First, an IoT-based approach is proposed to digitize human motion using smartphones instead of hardly accessible wearable sensors and markers. The motion data is then simulated to analyze the neuromusculoskeletal system. An agent-driven modeling method ensures the naturalness, accuracy, and interpretability of the simulation, incorporating neuromuscular details such as Henneman's size principle, action potentials, motor units, and biomechanical principles. The results are then provided to medical and clinical experts to aid in differentiating between healthy and unhealthy muscles and for further investigation. Additionally, a deep learning-based ensemble framework is proposed to assist in the analysis of the simulation results, offering both accuracy and interpretability. A user-friendly graphical interface enhances the application's usability. Being fully cloud-based, the application is infrastructure-independent and can be accessed on smartphones, PCs, and other devices without installation. This strategy not only addresses the current challenges in treating motion disorders but also paves the way for other clinical simulations by considering both scientific and computational requirements.}, } @article {pmid39897948, year = {2025}, author = {Papudeshi, B and Roach, MJ and Mallawaarachchi, V and Bouras, G and Grigson, SR and Giles, SK and Harker, CM and Hutton, ALK and Tarasenko, A and Inglis, LK and Vega, AA and Souza, C and Boling, L and Hajama, H and Cobián Güemes, AG and Segall, AM and Dinsdale, EA and Edwards, RA}, title = {Sphae: an automated toolkit for predicting phage therapy candidates from sequencing data.}, journal = {Bioinformatics advances}, volume = {5}, number = {1}, pages = {vbaf004}, pmid = {39897948}, issn = {2635-0041}, abstract = {MOTIVATION: Phage therapy offers a viable alternative for bacterial infections amid rising antimicrobial resistance. Its success relies on selecting safe and effective phage candidates that require comprehensive genomic screening to identify potential risks. However, this process is often labor intensive and time-consuming, hindering rapid clinical deployment.

RESULTS: We developed Sphae, an automated bioinformatics pipeline designed to streamline the therapeutic potential of a phage in under 10 minutes. Using Snakemake workflow manager, Sphae integrates tools for quality control, assembly, genome assessment, and annotation tailored specifically for phage biology. Sphae automates the detection of key genomic markers, including virulence factors, antimicrobial resistance genes, and lysogeny indicators such as integrase, recombinase, and transposase, which could preclude therapeutic use. Among the 65 phage sequences analyzed, 28 showed therapeutic potential, 8 failed due to low sequencing depth, 22 contained prophage or virulent markers, and 23 had multiple phage genomes. This workflow produces a report to assess phage safety and therapy suitability quickly. Sphae is scalable and portable, facilitating efficient deployment across most high-performance computing and cloud platforms, accelerating the genomic evaluation process.

Sphae source code is freely available at https://github.com/linsalrob/sphae, with installation supported on Conda, PyPi, Docker containers.}, } @article {pmid39896367, year = {2024}, author = {Bensaid, R and Labraoui, N and Abba Ari, AA and Saidi, H and Mboussam Emati, JH and Maglaras, L}, title = {SA-FLIDS: secure and authenticated federated learning-based intelligent network intrusion detection system for smart healthcare.}, journal = {PeerJ. Computer science}, volume = {10}, number = {}, pages = {e2414}, pmid = {39896367}, issn = {2376-5992}, abstract = {Smart healthcare systems are gaining increased practicality and utility, driven by continuous advancements in artificial intelligence technologies, cloud and fog computing, and the Internet of Things (IoT). However, despite these transformative developments, challenges persist within IoT devices, encompassing computational constraints, storage limitations, and attack vulnerability. These attacks target sensitive health information, compromise data integrity, and pose obstacles to the overall resilience of the healthcare sector. To address these vulnerabilities, Network-based Intrusion Detection Systems (NIDSs) are crucial in fortifying smart healthcare networks and ensuring secure use of IoMT-based applications by mitigating security risks. Thus, this article proposes a novel Secure and Authenticated Federated Learning-based NIDS framework using Blockchain (SA-FLIDS) for fog-IoMT-enabled smart healthcare systems. Our research aims to improve data privacy and reduce communication costs. Furthermore, we also address weaknesses in decentralized learning systems, like Sybil and Model Poisoning attacks. We leverage the blockchain-based Self-Sovereign Identity (SSI) model to handle client authentication and secure communication. Additionally, we use the Trimmed Mean method to aggregate data. This helps reduce the effect of unusual or malicious inputs when creating the overall model. Our approach is evaluated on real IoT traffic datasets such as CICIoT2023 and EdgeIIoTset. It demonstrates exceptional robustness against adversarial attacks. These findings underscore the potential of our technique to improve the security of IoMT-based healthcare applications.}, } @article {pmid39896264, year = {2025}, author = {Hoang, TH and Fuhrman, J and Klarqvist, M and Li, M and Chaturvedi, P and Li, Z and Kim, K and Ryu, M and Chard, R and Huerta, EA and Giger, M and Madduri, R}, title = {Enabling end-to-end secure federated learning in biomedical research on heterogeneous computing environments with APPFLx.}, journal = {Computational and structural biotechnology journal}, volume = {28}, number = {}, pages = {29-39}, pmid = {39896264}, issn = {2001-0370}, abstract = {Facilitating large-scale, cross-institutional collaboration in biomedical machine learning (ML) projects requires a trustworthy and resilient federated learning (FL) environment to ensure that sensitive information such as protected health information is kept confidential. Specifically designed for this purpose, this work introduces APPFLx - a low-code, easy-to-use FL framework that enables easy setup, configuration, and running of FL experiments. APPFLx removes administrative boundaries of research organizations and healthcare systems while providing secure end-to-end communication, privacy-preserving functionality, and identity management. Furthermore, it is completely agnostic to the underlying computational infrastructure of participating clients, allowing an instantaneous deployment of this framework into existing computing infrastructures. Experimentally, the utility of APPFLx is demonstrated in two case studies: (1) predicting participant age from electrocardiogram (ECG) waveforms, and (2) detecting COVID-19 disease from chest radiographs. Here, ML models were securely trained across heterogeneous computing resources, including a combination of on-premise high-performance computing and cloud computing facilities. By securely unlocking data from multiple sources for training without directly sharing it, these FL models enhance generalizability and performance compared to centralized training models while ensuring data remains protected. In conclusion, APPFLx demonstrated itself as an easy-to-use framework for accelerating biomedical studies across organizations and healthcare systems on large datasets while maintaining the protection of private medical data.}, } @article {pmid39896025, year = {2025}, author = {Zheng, X and Weng, Z}, title = {Design of an enhanced feature point matching algorithm utilizing 3D laser scanning technology for sculpture design.}, journal = {PeerJ. Computer science}, volume = {11}, number = {}, pages = {e2628}, pmid = {39896025}, issn = {2376-5992}, abstract = {As the aesthetic appreciation for art continues to grow, there is an increased demand for precision and detailed control in sculptural works. The advent of 3D laser scanning technology introduces transformative new tools and methodologies for refining correction systems in sculpture design. This article proposes a feature point matching algorithm based on fragment measurement and the iterative closest point (ICP) methodology, leveraging 3D laser scanning technology, namely Fragment Measurement Iterative Closest Point Feature Point Matching (FM-ICP-FPM). The FM-ICP-FPM approach uses the overlapping area of the two sculpture perspectives as a reference for attaching feature points. It employs the 3D measurement system to capture physical point cloud data from the two surfaces to enable the initial alignment of feature points. Feature vectors are generated by segmenting the region around the feature points and computing the intra-block gradient histogram. Subsequently, distance threshold conditions are set based on the constructed feature vectors and the preliminary feature point matches established during the coarse alignment to achieve precise feature point matching. Experimental results demonstrate the exceptional performance of the FM-ICP-FPM algorithm, achieving a sampling interval of 200. The correct matching rate reaches an impressive 100%, while the mean translation error (MTE) is a mere 154 mm, and the mean rotation angle error (MRAE) is 0.065 degrees. The indicator represents the degree of deviation in translation and rotation of the registered model, respectively. These low error values demonstrate that the FM-ICP-FPM algorithm excels in registration accuracy and can generate highly consistent three-dimensional models.}, } @article {pmid39896013, year = {2025}, author = {Alrowais, F and Arasi, MA and Alotaibi, SS and Alonazi, M and Marzouk, R and Salama, AS}, title = {Deep gradient reinforcement learning for music improvisation in cloud computing framework.}, journal = {PeerJ. Computer science}, volume = {11}, number = {}, pages = {e2265}, pmid = {39896013}, issn = {2376-5992}, abstract = {Artificial intelligence (AI) in music improvisation offers promising new avenues for developing human creativity. The difficulty of writing dynamic, flexible musical compositions in real time is discussed in this article. We explore using reinforcement learning (RL) techniques to create more interactive and responsive music creation systems. Here, the musical structures train an RL agent to navigate the complex space of musical possibilities to provide improvisations. The melodic framework in the input musical data is initially identified using bi-directional gated recurrent units. The lyrical concepts such as notes, chords, and rhythms from the recognised framework are transformed into a format suitable for RL input. The deep gradient-based reinforcement learning technique used in this research formulates a reward system that directs the agent to compose aesthetically intriguing and harmonically cohesive musical improvisations. The improvised music is further rendered in the MIDI format. The Bach Chorales dataset with six different attributes relevant to musical compositions is employed in implementing the present research. The model was set up in a containerised cloud environment and controlled for smooth load distribution. Five different parameters, such as pitch frequency (PF), standard pitch delay (SPD), average distance between peaks (ADP), note duration gradient (NDG) and pitch class gradient (PCG), are leveraged to assess the quality of the improvised music. The proposed model obtains +0.15 of PF, -0.43 of SPD, -0.07 of ADP and 0.0041 NDG, which is a better value than other improvisation methods.}, } @article {pmid39886429, year = {2025}, author = {Gadde, RSK and Devaguptam, S and Ren, F and Mittal, R and Dong, L and Wang, Y and Liu, F}, title = {Chatbot-assisted quantum chemistry for explicitly solvated molecules.}, journal = {Chemical science}, volume = {16}, number = {9}, pages = {3852-3864}, pmid = {39886429}, issn = {2041-6520}, abstract = {Advanced computational chemistry software packages have transformed chemical research by leveraging quantum chemistry and molecular simulations. Despite their capabilities, the complicated design and the requirement for specialized computing hardware hinder their applications in the broad chemistry community. Here, we introduce AutoSolvateWeb, a chatbot-assisted computational platform that addresses both challenges simultaneously. This platform employs a user-friendly chatbot interface to guide non-experts through a multistep procedure involving various computational packages, enabling them to configure and execute complex quantum mechanical/molecular mechanical (QM/MM) simulations of explicitly solvated molecules. Moreover, this platform operates on cloud infrastructure, allowing researchers to run simulations without hardware configuration challenges. As a proof of concept, AutoSolvateWeb demonstrates that combining virtual agents with cloud computing can democratize access to sophisticated computational research tools.}, } @article {pmid39875513, year = {2025}, author = {Rateb, R and Hadi, AA and Tamanampudi, VM and Abualigah, L and Ezugwu, AE and Alzahrani, AI and Alblehai, F and Jia, H}, title = {An optimal workflow scheduling in IoT-fog-cloud system for minimizing time and energy.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {3607}, pmid = {39875513}, issn = {2045-2322}, abstract = {Today, with the increasing use of the Internet of Things (IoT) in the world, various workflows that need to be stored and processed on the computing platforms. But this issue, causes an increase in costs for computing resources providers, and as a result, system Energy Consumption (EC) is also reduced. Therefore, this paper examines the workflow scheduling problem of IoT devices in the fog-cloud environment, where reducing the EC of the computing system and reducing the MakeSpan Time (MST) of workflows as main objectives, under the constraints of priority, deadline and reliability. Therefore, in order to achieve these objectives, the combination of Aquila and Salp Swarm Algorithms (ASSA) is used to select the best Virtual Machines (VMs) for the execution of workflows. So, in each iteration of ASSA execution, a number of VMs are selected by the ASSA. Then by using the Reducing MakeSpan Time (RMST) technique, the MST of the workflow on selected VMs is reduced, while maintaining reliability and deadline. Then, using VM merging and Dynamic Voltage Frequency Scaling (DVFS) technique on the output from RMST, the static and dynamic EC is reduced, respectively. Experimental results show the effectiveness of the proposed method compared to previous methods.}, } @article {pmid39874935, year = {2025}, author = {Bai, Y and Zhao, H and Shi, X and Chen, L}, title = {Towards practical and privacy-preserving CNN inference service for cloud-based medical imaging analysis: A homomorphic encryption-based approach.}, journal = {Computer methods and programs in biomedicine}, volume = {261}, number = {}, pages = {108599}, doi = {10.1016/j.cmpb.2025.108599}, pmid = {39874935}, issn = {1872-7565}, mesh = {*Cloud Computing ; *Neural Networks, Computer ; *Computer Security ; Humans ; *Diagnostic Imaging ; Privacy ; Algorithms ; Image Processing, Computer-Assisted/methods ; Deep Learning ; }, abstract = {BACKGROUND AND OBJECTIVE: Cloud-based Deep Learning as a Service (DLaaS) has transformed biomedicine by enabling healthcare systems to harness the power of deep learning for biomedical data analysis. However, privacy concerns emerge when sensitive user data must be transmitted to untrusted cloud servers. Existing privacy-preserving solutions are hindered by significant latency issues, stemming from the computational complexity of inner product operations in convolutional layers and the high communication costs of evaluating nonlinear activation functions. These limitations make current solutions impractical for real-world applications.

METHODS: In this paper, we address the challenges in mobile cloud-based medical imaging analysis, where users aim to classify private body-related radiological images using a Convolutional Neural Network (CNN) model hosted on a cloud server while ensuring data privacy for both parties. We propose PPCNN, a practical and privacy-preserving framework for CNN Inference. It introduces a novel mixed protocol that combines a low-expansion homomorphic encryption scheme with the noise-based masking method. Our framework is designed based on three key ideas: (1) optimizing computation costs by shifting unnecessary and expensive homomorphic multiplication operations to the offline phase, (2) introducing a coefficient-aware packing method to enable efficient homomorphic operations during the linear layer of the CNN, and (3) employing data masking techniques for nonlinear operations of the CNN to reduce communication costs.

RESULTS: We implemented PPCNN and evaluated its performance on three real-world radiological image datasets. Experimental results show that PPCNN outperforms state-of-the-art methods in mobile cloud scenarios, achieving superior response times and lower usage costs.

CONCLUSIONS: This study introduces an efficient and privacy-preserving framework for cloud-based medical imaging analysis, marking a significant step towards practical, secure, and trustworthy AI-driven healthcare solutions.}, } @article {pmid39874565, year = {2025}, author = {Oh, S and Lee, S}, title = {Rehabilomics Strategies Enabled by Cloud-Based Rehabilitation: Scoping Review.}, journal = {Journal of medical Internet research}, volume = {27}, number = {}, pages = {e54790}, pmid = {39874565}, issn = {1438-8871}, mesh = {*Cloud Computing ; Humans ; Rehabilitation/methods ; Proteomics/methods ; Genomics/methods ; Precision Medicine/methods ; Metabolomics/methods ; }, abstract = {BACKGROUND: Rehabilomics, or the integration of rehabilitation with genomics, proteomics, metabolomics, and other "-omics" fields, aims to promote personalized approaches to rehabilitation care. Cloud-based rehabilitation offers streamlined patient data management and sharing and could potentially play a significant role in advancing rehabilomics research. This study explored the current status and potential benefits of implementing rehabilomics strategies through cloud-based rehabilitation.

OBJECTIVE: This scoping review aimed to investigate the implementation of rehabilomics strategies through cloud-based rehabilitation and summarize the current state of knowledge within the research domain. This analysis aims to understand the impact of cloud platforms on the field of rehabilomics and provide insights into future research directions.

METHODS: In this scoping review, we systematically searched major academic databases, including CINAHL, Embase, Google Scholar, PubMed, MEDLINE, ScienceDirect, Scopus, and Web of Science to identify relevant studies and apply predefined inclusion criteria to select appropriate studies. Subsequently, we analyzed 28 selected papers to identify trends and insights regarding cloud-based rehabilitation and rehabilomics within this study's landscape.

RESULTS: This study reports the various applications and outcomes of implementing rehabilomics strategies through cloud-based rehabilitation. In particular, a comprehensive analysis was conducted on 28 studies, including 16 (57%) focused on personalized rehabilitation and 12 (43%) on data security and privacy. The distribution of articles among the 28 studies based on specific keywords included 3 (11%) on the cloud, 4 (14%) on platforms, 4 (14%) on hospitals and rehabilitation centers, 5 (18%) on telehealth, 5 (18%) on home and community, and 7 (25%) on disease and disability. Cloud platforms offer new possibilities for data sharing and collaboration in rehabilomics research, underpinning a patient-centered approach and enhancing the development of personalized therapeutic strategies.

CONCLUSIONS: This scoping review highlights the potential significance of cloud-based rehabilomics strategies in the field of rehabilitation. The use of cloud platforms is expected to strengthen patient-centered data management and collaboration, contributing to the advancement of innovative strategies and therapeutic developments in rehabilomics.}, } @article {pmid39866891, year = {2025}, author = {Roth, I and Cohen, O}, title = {The use of an automatic remote weight management system to track treatment response, identified drugs supply shortage and its consequences: A pilot study.}, journal = {Digital health}, volume = {11}, number = {}, pages = {20552076251314090}, pmid = {39866891}, issn = {2055-2076}, abstract = {OBJECTIVE: The objective of this pilot study is to evaluate the feasibility of using an automatic weight management system to follow patients' response to weight reduction medications and to identify early deviations from weight trajectories.

METHODS: The pilot study involved 11 participants using Semaglutide for weight management, monitored over a 12-month period. A cloud-based, Wi-Fi-enabled remote weight management system collected and analyzed daily weight data from smart scales. The system's performance was evaluated during a period marked by a Semaglutide supply shortage.

RESULTS: Participants achieved a cumulative weight loss of 85 kg until a supply shortage-induced trough in October 2022. This was followed by a 6-8 week plateau and a subsequent 13 kg cumulative weight gain. The study demonstrated the feasibility of digitally monitoring weight without attrition over 12 months and highlighted the impact of anti-obesity drug (AOD) supply constraints on weight trajectories.

CONCLUSIONS: The remote weight management system proved important for improving clinic efficacy and identifying trends impacting obesity outcomes through electronic data monitoring. The system's potential in increasing medication compliance and enhancing overall clinical outcomes warrants further research, particularly in light of the challenges posed by AOD supply fluctuations.}, } @article {pmid39864359, year = {2025}, author = {Fang, C and Song, K and Yan, Z and Liu, G}, title = {Monitoring phycocyanin in global inland waters by remote sensing: Progress and future developments.}, journal = {Water research}, volume = {275}, number = {}, pages = {123176}, doi = {10.1016/j.watres.2025.123176}, pmid = {39864359}, issn = {1879-2448}, mesh = {*Phycocyanin/analysis ; *Remote Sensing Technology ; *Environmental Monitoring/methods ; Cyanobacteria ; Lakes ; }, abstract = {Cyanobacterial blooms are increasingly becoming major threats to global inland aquatic ecosystems. Phycocyanin (PC), a pigment unique to cyanobacteria, can provide important reference for the study of cyanobacterial blooms warning. New satellite technology and cloud computing platforms have greatly improved research on PC, with the average number of studies examining it having increased from 5 per year before 2018 to 17 per year thereafter. Many empirical, semi-empirical, semi-analytical, quasi-analytical algorithm (QAA) and machine learning (ML) algorithms have been developed based on unique absorption characteristics of PC at approximately 620 nm. However, most models have been developed for individual lakes or clusters of them in specific regions, and their applicability at greater spatial scales requires evaluation. A review of optical mechanisms, principles and advantages and disadvantages of different model types, performance advantages and disadvantages of mainstream sensors in PC remote sensing inversion, and an evaluation of global lacustrine PC datasets is needed. We examine 230 articles from the Web of Science citation database between 1900 and 2024, summarize 57 of them that deal with construction of PC inversion models, and compile a list of 6526 PC sampling sites worldwide. This review proposed the key to achieving global lacustrine PC remote sensing inversion and spatiotemporal evolution analysis is to fully use existing multi-source remote sensing big data platforms, and a deep combination of ML and optical mechanisms, to classify the object lakes in advance based on lake optical characteristics, eutrophication level, water depth, climate type, altitude, population density within the watershed. Additionally, integrating data from multi-source satellite sensors, ground-based observations, and unmanned aerial vehicles, will enable future development of global lacustrine PC remote estimation, and contribute to achieving United Nations Sustainable Development Goals inland water goals.}, } @article {pmid39860907, year = {2025}, author = {Mennilli, R and Mazza, L and Mura, A}, title = {Integrating Machine Learning for Predictive Maintenance on Resource-Constrained PLCs: A Feasibility Study.}, journal = {Sensors (Basel, Switzerland)}, volume = {25}, number = {2}, pages = {}, pmid = {39860907}, issn = {1424-8220}, abstract = {This study investigates the potential of deploying a neural network model on an advanced programmable logic controller (PLC), specifically the Finder Opta™, for real-time inference within the predictive maintenance framework. In the context of Industry 4.0, edge computing aims to process data directly on local devices rather than relying on a cloud infrastructure. This approach minimizes latency, enhances data security, and reduces the bandwidth required for data transmission, making it ideal for industrial applications that demand immediate response times. Despite the limited memory and processing power inherent to many edge devices, this proof-of-concept demonstrates the suitability of the Finder Opta™ for such applications. Using acoustic data, a convolutional neural network (CNN) is deployed to infer the rotational speed of a mechanical test bench. The findings underscore the potential of the Finder Opta™ to support scalable and efficient predictive maintenance solutions, laying the groundwork for future research in real-time anomaly detection. By enabling machine learning capabilities on compact, resource-constrained hardware, this approach promises a cost-effective, adaptable solution for diverse industrial environments.}, } @article {pmid39860904, year = {2025}, author = {Gu, X and Duan, Z and Ye, G and Chang, Z}, title = {Virtual Node-Driven Cloud-Edge Collaborative Resource Scheduling for Surveillance with Visual Sensors.}, journal = {Sensors (Basel, Switzerland)}, volume = {25}, number = {2}, pages = {}, pmid = {39860904}, issn = {1424-8220}, support = {2022B01008-1//Key R&D Program of Xinjiang Uygur Autonomous Region/ ; ICL20230201//Open Fund of Intelligent Control Laboratory/ ; }, abstract = {For public security purposes, distributed surveillance systems are widely deployed in key areas. These systems comprise visual sensors, edge computing boxes, and cloud servers. Resource scheduling algorithms are critical to ensure such systems' robustness and efficiency. They balance workloads and need to meet real-time monitoring and emergency response requirements. Existing works have primarily focused on optimizing Quality of Service (QoS), latency, and energy consumption in edge computing under resource constraints. However, the issue of task congestion due to insufficient physical resources has been rarely investigated. In this paper, we tackle the challenges posed by large workloads and limited resources in the context of surveillance with visual sensors. First, we introduce the concept of virtual nodes for managing resource shortages, referred to as virtual node-driven resource scheduling. Then, we propose a convex-objective integer linear programming (ILP) model based on this concept and demonstrate its efficiency. Additionally, we propose three alternative virtual node-driven scheduling algorithms, the extension of a random algorithm, a genetic algorithm, and a heuristic algorithm, respectively. These algorithms serve as benchmarks for comparison with the proposed ILP model. Experimental results show that all the scheduling algorithms can effectively address the challenge of offloading multiple priority tasks under resource constraints. Furthermore, the ILP model shows the best scheduling performance among them.}, } @article {pmid39856226, year = {2025}, author = {Alsahfi, T and Badshah, A and Aboulola, OI and Daud, A}, title = {Optimizing healthcare big data performance through regional computing.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {3129}, pmid = {39856226}, issn = {2045-2322}, support = {UJ-24-DR-446-1//University of Jeddah/ ; }, mesh = {*Big Data ; Humans ; *Cloud Computing ; *Electronic Health Records ; *Delivery of Health Care ; Internet of Things ; }, abstract = {The healthcare sector is experiencing a digital transformation propelled by the Internet of Medical Things (IOMT), real-time patient monitoring, robotic surgery, Electronic Health Records (EHR), medical imaging, and wearable technologies. This proliferation of digital tools generates vast quantities of healthcare data. Efficient and timely analysis of this data is critical for enhancing patient outcomes and optimizing care delivery. Real-time processing of Healthcare Big Data (HBD) offers significant potential for improved diagnostics, continuous monitoring, and effective surgical interventions. However, conventional cloud-based processing systems face challenges due to the sheer volume and time-sensitive nature of this data. The migration of large datasets to centralized cloud infrastructures often results in latency, which impedes real-time applications. Furthermore, network congestion exacerbates these challenges, delaying access to vital insights necessary for informed decision-making. Such limitations hinder healthcare professionals from fully leveraging the capabilities of emerging technologies and big data analytics. To mitigate these issues, this paper proposes a Regional Computing (RC) paradigm for the management of HBD. The RC framework establishes strategically positioned regional servers capable of regionally collecting, processing, and storing medical data, thereby reducing dependence on centralized cloud resources, especially during peak usage periods. This innovative approach effectively addresses the constraints of traditional cloud processing, facilitating real-time data analysis at the regional level. Ultimately, it empowers healthcare providers with the timely information required to deliver data-driven, personalized care and optimize treatment strategies.}, } @article {pmid39851627, year = {2024}, author = {Tang, Y and Guo, M and Li, B and Geng, K and Yu, J and Qin, B}, title = {Flexible Threshold Quantum Homomorphic Encryption on Quantum Networks.}, journal = {Entropy (Basel, Switzerland)}, volume = {27}, number = {1}, pages = {}, pmid = {39851627}, issn = {1099-4300}, support = {62472144//National Natural Science Foundation of China/ ; ICNS202006//Shaanxi Key Laboratory of Information Communication Network and Security, Xi'an University of Posts & Telecommunications, Xi'an, Shaanxi 710121, China/ ; SKLACSS-202401//Open Fund of Advanced Cryptography and System Security Key Laboratory of Sichuan Province/ ; 2024NSFSC0515//Sichuan Science and Technology Program/ ; }, abstract = {Currently, most quantum homomorphic encryption (QHE) schemes only allow a single evaluator (server) to accomplish computation tasks on encrypted data shared by the data owner (user). In addition, the quantum computing capability of the evaluator and the scope of quantum computation it can perform are usually somewhat limited, which significantly reduces the flexibility of the scheme in quantum network environments. In this paper, we propose a novel (t,n)-threshold QHE (TQHE) network scheme based on the Shamir secret sharing protocol, which allows k(t≤k≤n) evaluators to collaboratively perform evaluation computation operations on each qubit within the shared encrypted sequence. Moreover, each evaluator, while possessing the ability to perform all single-qubit unitary operations, is able to perform arbitrary single-qubit gate computation task assigned by the data owner. We give a specific (3, 5)-threshold example, illustrating the scheme's correctness and feasibility, and simulate it on IBM quantum computing cloud platform. Finally, it is shown that the scheme is secure by analyzing encryption/decryption private keys, ciphertext quantum state sequences during transmission, plaintext quantum state sequence, and the result after computations on the plaintext quantum state sequence.}, } @article {pmid39851169, year = {2025}, author = {Kwon, K and Lee, YJ and Chung, S and Lee, J and Na, Y and Kwon, Y and Shin, B and Bateman, A and Lee, J and Guess, M and Sohn, JW and Lee, J and Yeo, WH}, title = {Full Body-Worn Textile-Integrated Nanomaterials and Soft Electronics for Real-Time Continuous Motion Recognition Using Cloud Computing.}, journal = {ACS applied materials & interfaces}, volume = {17}, number = {5}, pages = {7977-7988}, pmid = {39851169}, issn = {1944-8252}, mesh = {Humans ; *Textiles ; *Wearable Electronic Devices ; *Nanostructures/chemistry ; *Cloud Computing ; Wireless Technology/instrumentation ; Electronics ; Graphite/chemistry ; Movement/physiology ; Monitoring, Ambulatory/instrumentation/methods ; }, abstract = {Recognizing human body motions opens possibilities for real-time observation of users' daily activities, revolutionizing continuous human healthcare and rehabilitation. While some wearable sensors show their capabilities in detecting movements, no prior work could detect full-body motions with wireless devices. Here, we introduce a soft electronic textile-integrated system, including nanomaterials and flexible sensors, which enables real-time detection of various full-body movements using the combination of a wireless sensor suit and deep-learning-based cloud computing. This system includes an array of a nanomembrane, laser-induced graphene strain sensors, and flexible electronics integrated with textiles for wireless detection of different body motions and workouts. With multiple human subjects, we demonstrate the system's performance in real-time prediction of eight different activities, including resting, walking, running, squatting, walking upstairs, walking downstairs, push-ups, and jump roping, with an accuracy of 95.3%. The class of technologies, integrated as full body-worn textile electronics and interactive pairing with smartwatches and portable devices, can be used in real-world applications such as ambulatory health monitoring via conjunction with smartwatches and feedback-enabled customized rehabilitation workouts.}, } @article {pmid39848197, year = {2025}, author = {Novais, JJM and Melo, BMD and Neves Junior, AF and Lima, RHC and de Souza, RE and Melo, VF and do Amaral, EF and Tziolas, N and Demattê, JAM}, title = {Online analysis of Amazon's soils through reflectance spectroscopy and cloud computing can support policies and the sustainable development.}, journal = {Journal of environmental management}, volume = {375}, number = {}, pages = {124155}, doi = {10.1016/j.jenvman.2025.124155}, pmid = {39848197}, issn = {1095-8630}, mesh = {*Soil/chemistry ; *Cloud Computing ; *Sustainable Development ; Spectrum Analysis/methods ; Brazil ; Carbon/analysis ; }, abstract = {Analyzing soil in large and remote areas such as the Amazon River Basin (ARB) is unviable when it is entirely performed by wet labs using traditional methods due to the scarcity of labs and the significant workforce requirements, increasing costs, time, and waste. Remote sensing, combined with cloud computing, enhances soil analysis by modeling soil from spectral data and overcoming the limitations of traditional methods. We verified the potential of soil spectroscopy in conjunction with cloud-based computing to predict soil organic carbon (SOC) and particle size (sand, silt, and clay) content from the Amazon region. To this end, we request physicochemical attribute values determined by wet laboratory analyses of 211 soil samples from the ARB. These samples were submitted to spectroscopy Vis-NIR-SWIR in the laboratory. Two approaches modeled the soil attributes: M-I) cloud-computing-based using the Brazilian Soil Spectral Service (BraSpecS) platform, and M-II) computing-based in an offline environment using R programming language. Both methods used the Cubist machine learning algorithm for modeling. The coefficient of determination (R[2]), mean absolute error (MAE) and root mean squared error (RMSE) served as criteria for performance assessment. The soil attributes prediction was highly consistent, considering the measured and predicted by both approaches M-I and M-II. The M-II outperformed the M-I in predicting both particle size and SOC. For clay content, the offline model achieved an R[2] of 0.85, with an MAE of 86.16 g kg[-][1] and RMSE of 111.73 g kg[-][1], while the online model had an R[2] of 0.70, MAE of 111.73 g kg[-][1], and RMSE of 144.19 g kg[-][1]. For SOC, the offline model also showed better performance, with an R[2] of 0.81, MAE of 3.42 g kg[-][1], and RMSE of 4.57 g kg[-][1], compared to an R[2] of 0.72, MAE of 3.66 g kg[-][1], and RMSE of 5.53 g kg[-][1] for the M-I. Both modeling methods demonstrated the power of reflectance spectroscopy and cloud computing to survey soils in remote and large areas such as ARB. The synergetic use of these techniques can support policies and sustainable development.}, } @article {pmid39847768, year = {2025}, author = {Seth, M and Jalo, H and Högstedt, Å and Medin, O and Sjöqvist, BA and Candefjord, S}, title = {Technologies for Interoperable Internet of Medical Things Platforms to Manage Medical Emergencies in Home and Prehospital Care: Scoping Review.}, journal = {Journal of medical Internet research}, volume = {27}, number = {}, pages = {e54470}, pmid = {39847768}, issn = {1438-8871}, mesh = {Humans ; *Emergency Medical Services/methods ; *Home Care Services ; *Internet of Things ; Telemedicine ; }, abstract = {BACKGROUND: The aging global population and the rising prevalence of chronic disease and multimorbidity have strained health care systems, driving the need for expanded health care resources. Transitioning to home-based care (HBC) may offer a sustainable solution, supported by technological innovations such as Internet of Medical Things (IoMT) platforms. However, the full potential of IoMT platforms to streamline health care delivery is often limited by interoperability challenges that hinder communication and pose risks to patient safety. Gaining more knowledge about addressing higher levels of interoperability issues is essential to unlock the full potential of IoMT platforms.

OBJECTIVE: This scoping review aims to summarize best practices and technologies to overcome interoperability issues in IoMT platform development for prehospital care and HBC.

METHODS: This review adheres to a protocol published in 2022. Our literature search followed a dual search strategy and was conducted up to August 2023 across 6 electronic databases: IEEE Xplore, PubMed, Scopus, ACM Digital Library, Sage Journals, and ScienceDirect. After the title, abstract, and full-text screening performed by 2 reviewers, 158 articles were selected for inclusion. To answer our 2 research questions, we used 2 models defined in the protocol: a 6-level interoperability model and a 5-level IoMT reference model. Data extraction and synthesis were conducted through thematic analysis using Dedoose. The findings, including commonly used technologies and standards, are presented through narrative descriptions and graphical representations.

RESULTS: The primary technologies and standards reported for interoperable IoMT platforms in prehospital care and HBC included cloud computing (19/30, 63%), representational state transfer application programming interfaces (REST APIs; 17/30, 57%), Wi-Fi (17/30, 57%), gateways (15/30, 50%), and JSON (14/30, 47%). Message queuing telemetry transport (MQTT; 7/30, 23%) and WebSocket (7/30, 23%) were commonly used for real-time emergency alerts, while fog and edge computing were often combined with cloud computing for enhanced processing power and reduced latencies. By contrast, technologies associated with higher interoperability levels, such as blockchain (2/30, 7%), Kubernetes (3/30, 10%), and openEHR (2/30, 7%), were less frequently reported, indicating a focus on lower level of interoperability in most of the included studies (17/30, 57%).

CONCLUSIONS: IoMT platforms that support higher levels of interoperability have the potential to deliver personalized patient care, enhance overall patient experience, enable early disease detection, and minimize time delays. However, our findings highlight a prevailing emphasis on lower levels of interoperability within the IoMT research community. While blockchain, microservices, Docker, and openEHR are described as suitable solutions in the literature, these technologies seem to be seldom used in IoMT platforms for prehospital care and HBC. Recognizing the evident benefit of cross-domain interoperability, we advocate a stronger focus on collaborative initiatives and technologies to achieve higher levels of interoperability.

RR2-10.2196/40243.}, } @article {pmid39833217, year = {2025}, author = {Ali, A and Hussain, B and Hissan, RU and Al Aiban, KM and Radulescu, M and Magazzino, C}, title = {Examining the landscape transformation and temperature dynamics in Pakistan.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {2575}, pmid = {39833217}, issn = {2045-2322}, support = {RSPD2024R742//King Saud University/ ; }, abstract = {This study aims to examine the landscape transformation and temperature dynamics using multiple spectral indices. The processes of temporal fluctuations in the land surface temperature is strongly related to the morphological features of the area in which the temperature is determined, and the given factors significantly affect the thermal properties of the surface. This research is being conducted in Pakistan to identify the vegetation cover, water bodies, impervious surfaces, and land surface temperature using decadal remote sensing data with four intervals during 1993-2023 in the Mardan division, Khyber Pakhtunkhwa. To analyze the landscape transformation and temperature dynamics, the study used spectral indices including Land Surface Temperature, Normalized Difference Vegetation Index, Normalized Difference Water Index, Normalized Difference Built-up Index, and Normalized Difference Bareness Index by employing Google Earth Engine cloud computing platform. The results suggest that there are differences in the type of land surface temperature, ranging from 15.58 °C to 43.71 °C during the study period. Nevertheless, larger fluctuations in land surface temperature were found in the cover and protective forests of the study area, especially in the northwestern and southeastern parts of the system. These results highlighted the complexity of the relationship between land surface temperature and spectral indices regarding the need for spectral indices.}, } @article {pmid39827314, year = {2025}, author = {Soman, VK and Natarajan, V}, title = {Crayfish optimization based pixel selection using block scrambling based encryption for secure cloud computing environment.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {2406}, pmid = {39827314}, issn = {2045-2322}, abstract = {Cloud Computing (CC) is a fast emerging field that enables consumers to access network resources on-demand. However, ensuring a high level of security in CC environments remains a significant challenge. Traditional encryption algorithms are often inadequate in protecting confidential data, especially digital images, from complex cyberattacks. The increasing reliance on cloud storage and transmission of digital images has made it essential to develop strong security measures to stop unauthorized access and guarantee the integrity of sensitive information. This paper presents a novel Crayfish Optimization based Pixel Selection using Block Scrambling Based Encryption Approach (CFOPS-BSBEA) technique that offers a unique solution to improve security in cloud environments. By integrating steganography and encryption, the CFOPS-BSBEA technique provides a robust approach to secure digital images. Our key contribution lies in the development of a three-stage process that optimally selects pixels for steganography, encodes secret images using Block Scrambling Based Encryption, and embeds them in cover images. The CFOPS-BSBEA technique leverages the strengths of both steganography and encryption to provide a secure and effective approach to digital image protection. The Crayfish Optimization algorithm is used to select the most suitable pixels for steganography, ensuring that the secret image is embedded in a way that minimizes detection. The Block Scrambling Based Encryption algorithm is then used to encode the secret image, providing an additional layer of security. Experimental results show that the CFOPS-BSBEA technique outperforms existing models in terms of security performance. The proposed approach has significant implications for the secure storage and transmission of digital images in cloud environments, and its originality and novelty make it an attractive contribution to the field. Furthermore, the CFOPS-BSBEA technique has the potential to inspire further research in secure cloud computing environments, making the way for the development of more robust and efficient security measures.}, } @article {pmid39823316, year = {2025}, author = {Kari Balakrishnan, A and Chellaperumal, A and Lakshmanan, S and Vijayakumar, S}, title = {A novel efficient data storage and data auditing in cloud environment using enhanced child drawing development optimization strategy.}, journal = {Network (Bristol, England)}, volume = {}, number = {}, pages = {1-41}, doi = {10.1080/0954898X.2024.2443622}, pmid = {39823316}, issn = {1361-6536}, abstract = {The optimization on the cloud-based data structures is carried out using Adaptive Level and Skill Rate-based Child Drawing Development Optimization algorithm (ALSR-CDDO). Also, the overall cost required in computing and communicating is reduced by optimally selecting these data structures by the ALSR-CDDO algorithm. The storage of the data in the cloud platform is performed using the Divide and Conquer Table (D&CT). The location table and the information table are generated using the D&CT method. The details, such as the file information, file ID, version number, and user ID, are all present in the information table. Every time data is deleted or updated, and its version number is modified. Whenever an update takes place using D&CT, the location table also gets upgraded. The information regarding the location of a file in the Cloud Service Provider (CSP) is given in the location table. Once the data is stored in the CSP, the auditing of the data is then performed on the stored data. Both dynamic and batch auditing are carried out on the stored data, even if it gets updated dynamically in the CSP. The security offered by the executed scheme is verified by contrasting it with other existing auditing schemes.}, } @article {pmid39814842, year = {2025}, author = {Yan, K and Yu, X and Liu, J and Wang, J and Chen, X and Pu, J and Weiss, M and Myneni, RB}, title = {HiQ-FPAR: A High-Quality and Value-added MODIS Global FPAR Product from 2000 to 2023.}, journal = {Scientific data}, volume = {12}, number = {1}, pages = {72}, pmid = {39814842}, issn = {2052-4463}, support = {42271356//National Natural Science Foundation of China (National Science Foundation of China)/ ; }, abstract = {The Fraction of Absorbed Photosynthetically Active Radiation (FPAR) is essential for assessing vegetation's photosynthetic efficiency and ecosystem energy balance. While the MODIS FPAR product provides valuable global data, its reliability is compromised by noise, particularly under poor observation conditions like cloud cover. To solve this problem, we developed the Spatio-Temporal Information Composition Algorithm (STICA), which enhances MODIS FPAR by integrating quality control, spatio-temporal correlations, and original FPAR values, resulting in the High-Quality FPAR (HiQ-FPAR) product. HiQ-FPAR shows superior accuracy compared to MODIS FPAR and Sensor-Independent FPAR (SI-FPAR), with RMSE values of 0.130, 0.154, and 0.146, respectively, and R[2] values of 0.722, 0.630, and 0.717. Additionally, HiQ-FPAR exhibits smoother time series in 52.1% of global areas, compared to 44.2% for MODIS. Available on Google Earth Engine and Zenodo, the HiQ-FPAR dataset offers 500 m and 5 km resolution at an 8-day interval from 2000 to 2023, supporting a wide range of FPAR applications.}, } @article {pmid39802502, year = {2025}, author = {Rushton, CE and Tate, JE and Sjödin, Å}, title = {A modern, flexible cloud-based database and computing service for real-time analysis of vehicle emissions data.}, journal = {Urban informatics}, volume = {4}, number = {1}, pages = {1}, pmid = {39802502}, issn = {2731-6963}, abstract = {In response to the demand for advanced tools in environmental monitoring and policy formulation, this work leverages modern software and big data technologies to enhance novel road transport emissions research. This is achieved by making data and analysis tools more widely available and customisable so users can tailor outputs to their requirements. Through the novel combination of vehicle emissions remote sensing and cloud computing methodologies, these developments aim to reduce the barriers to understanding real-driving emissions (RDE) across urban environments. The platform demonstrates the practical application of modern cloud-computing resources in overcoming the complex demands of air quality management and policy monitoring. This paper shows the potential of modern technological solutions to improve the accessibility of environmental data for policy-making and the broader pursuit of sustainable urban development. The web-application is publicly and freely available at https://cares-public-app.azurewebsites.net.}, } @article {pmid39796896, year = {2024}, author = {Ahmed, AA and Farhan, K and Ninggal, MIH and Alselwi, G}, title = {Retrieving and Identifying Remnants of Artefacts on Local Devices Using Sync.com Cloud.}, journal = {Sensors (Basel, Switzerland)}, volume = {25}, number = {1}, pages = {}, pmid = {39796896}, issn = {1424-8220}, abstract = {Most current research in cloud forensics is focused on tackling the challenges encountered by forensic investigators in identifying and recovering artifacts from cloud devices. These challenges arise from the diverse array of cloud service providers as each has its distinct rules, guidelines, and requirements. This research proposes an investigation technique for identifying and locating data remnants in two main stages: artefact collection and evidence identification. In the artefacts collection stage, the proposed technique determines the location of the artefacts in cloud storage and collects them for further investigation in the next stage. In the evidence identification stage, the collected artefacts are investigated to identify the evidence relevant to the cybercrime currently being investigated. These two stages perform an integrated process for mitigating the difficulty of locating the artefacts and reducing the time of identifying the relevant evidence. The proposed technique is implemented and tested by applying a forensics investigation algorithm on Sync.com cloud storage using the Microsoft Windows 10 operating system.}, } @article {pmid39793205, year = {2025}, author = {Hoyer, I and Utz, A and Hoog Antink, C and Seidl, K}, title = {tinyHLS: a novel open source high level synthesis tool targeting hardware accelerators for artificial neural network inference.}, journal = {Physiological measurement}, volume = {13}, number = {1}, pages = {}, doi = {10.1088/1361-6579/ada8f0}, pmid = {39793205}, issn = {1361-6579}, mesh = {*Neural Networks, Computer ; Electrocardiography ; Humans ; *Software ; Signal Processing, Computer-Assisted ; *Computers ; Atrial Fibrillation/diagnosis ; }, abstract = {Objective.In recent years, wearable devices such as smartwatches and smart patches have revolutionized biosignal acquisition and analysis, particularly for monitoring electrocardiography (ECG). However, the limited power supply of these devices often precludes real-time data analysis on the patch itself.Approach.This paper introduces a novel Python package, tinyHLS (High Level Synthesis), designed to address these challenges by converting Python-based AI models into platform-independent hardware description language code accelerators. Specifically designed for convolutional neural networks, tinyHLS seamlessly integrates into the AI developer's workflow in Python TensorFlow Keras. Our methodology leverages a template-based hardware compiler that ensures flexibility, efficiency, and ease of use. In this work, tinyHLS is first-published featuring templates for several layers of neural networks, such as dense, convolution, max and global average pooling. In the first version, rectified linear unit is supported as activation. It targets one-dimensional data, with a particular focus on time series data.Main results.The generated accelerators are validated in detecting atrial fibrillation on ECG data, demonstrating significant improvements in processing speed (62-fold) and energy efficiency (4.5-fold). Quality of code and synthesizability are ensured by validating the outputs with commercial ASIC design tools.Significance.Importantly, tinyHLS is open-source and does not rely on commercial tools, making it a versatile solution for both academic and commercial applications. The paper also discusses the integration with an open-source RISC-V and potential for future enhancements of tinyHLS, including its application in edge servers and cloud computing. The source code is available on GitHub:https://github.com/Fraunhofer-IMS/tinyHLS.}, } @article {pmid39792877, year = {2025}, author = {Scales, C and Bai, J and Murakami, D and Young, J and Cheng, D and Gupta, P and Claypool, C and Holland, E and Kading, D and Hauser, W and O'Dell, L and Osae, E and Blackie, CA}, title = {Internal validation of a convolutional neural network pipeline for assessing meibomian gland structure from meibography.}, journal = {Optometry and vision science : official publication of the American Academy of Optometry}, volume = {102}, number = {1}, pages = {28-36}, pmid = {39792877}, issn = {1538-9235}, mesh = {Humans ; *Meibomian Glands/diagnostic imaging ; *Neural Networks, Computer ; Algorithms ; *Diagnostic Techniques, Ophthalmological ; *Meibomian Gland Dysfunction/diagnosis ; Reproducibility of Results ; Convolutional Neural Networks ; }, abstract = {SIGNIFICANCE: Optimal meibography utilization and interpretation are hindered due to poor lid presentation, blurry images, or image artifacts and the challenges of applying clinical grading scales. These results, using the largest image dataset analyzed to date, demonstrate development of algorithms that provide standardized, real-time inference that addresses all of these limitations.

PURPOSE: This study aimed to develop and validate an algorithmic pipeline to automate and standardize meibomian gland absence assessment and interpretation.

METHODS: A total of 143,476 images were collected from sites across North America. Ophthalmologist and optometrist experts established ground-truth image quality and quantification (i.e., degree of gland absence). Annotated images were allocated into training, validation, and test sets. Convolutional neural networks within Google Cloud VertexAI trained three locally deployable or edge-based predictive models: image quality detection, over-flip detection, and gland absence detection. The algorithms were combined into an algorithmic pipeline onboard a LipiScan Dynamic Meibomian Imager to provide real-time clinical inference for new images. Performance metrics were generated for each algorithm in the pipeline onboard the LipiScan from naive image test sets.

RESULTS: Individual model performance metrics included the following: weighted average precision (image quality detection: 0.81, over-flip detection: 0.88, gland absence detection: 0.84), weighted average recall (image quality detection: 0.80, over-flip detection: 0.87, gland absence detection: 0.80), weighted average F1 score (image quality detection: 0.80, over-flip detection: 0.87, gland absence detection: 0.81), overall accuracy (image quality detection: 0.80, over-flip detection: 0.87, gland absence detection: 0.80), Cohen κ (image quality detection: 0.60, over-flip detection: 0.62, and gland absence detection: 0.71), Kendall τb (image quality detection: 0.61, p<0.001, over-flip detection: 0.63, p<0.001, and gland absence detection: 0.67, p<001), and Matthews coefficient (image quality detection: 0.61, over-flip detection: 0.63, and gland absence detection: 0.62). Area under the precision-recall curve (image quality detection: 0.87 over-flip detection: 0.92, gland absence detection: 0.89) and area under the receiver operating characteristic curve (image quality detection: 0.88, over-flip detection: 0.91 gland absence detection: 0.93) were calculated across a common set of thresholds, ranging from 0 to 1.

CONCLUSIONS: Comparison of predictions from each model to expert panel ground-truth demonstrated strong association and moderate to substantial agreement. The findings and performance metrics show that the pipeline of algorithms provides standardized, real-time inference/prediction of meibomian gland absence.}, } @article {pmid39792820, year = {2025}, author = {Lu, C and Zhou, J and Zou, Q}, title = {An optimized approach for container deployment driven by a two-stage load balancing mechanism.}, journal = {PloS one}, volume = {20}, number = {1}, pages = {e0317039}, pmid = {39792820}, issn = {1932-6203}, mesh = {*Algorithms ; *Cloud Computing ; }, abstract = {Lightweight container technology has emerged as a fundamental component of cloud-native computing, with the deployment of containers and the balancing of loads on virtual machines representing significant challenges. This paper presents an optimization strategy for container deployment that consists of two stages: coarse-grained and fine-grained load balancing. In the initial stage, a greedy algorithm is employed for coarse-grained deployment, facilitating the distribution of container services across virtual machines in a balanced manner based on resource requests. The subsequent stage utilizes a genetic algorithm for fine-grained resource allocation, ensuring an equitable distribution of resources to each container service on a single virtual machine. This two-stage optimization enhances load balancing and resource utilization throughout the system. Empirical results indicate that this approach is more efficient and adaptable in comparison to the Grey Wolf Optimization (GWO) Algorithm, the Simulated Annealing (SA) Algorithm, and the GWO-SA Algorithm, significantly improving both resource utilization and load balancing performance on virtual machines.}, } @article {pmid39788517, year = {2024}, author = {Kuang, Y and Cao, D and Jiang, D and Zuo, Y and Lu, F and Yuan, J and Fang, Z and Zou, Y and Wang, H and Wu, C and Pei, Q and Yang, G}, title = {CPhaMAS: The first pharmacokinetic analysis cloud platform developed by China.}, journal = {Zhong nan da xue xue bao. Yi xue ban = Journal of Central South University. Medical sciences}, volume = {49}, number = {8}, pages = {1290-1300}, pmid = {39788517}, issn = {1672-7347}, mesh = {*Cloud Computing ; China ; *Software ; Humans ; Pharmacokinetics ; Pharmacology, Clinical/education/methods ; Reproducibility of Results ; Computer Security ; User-Computer Interface ; }, abstract = {OBJECTIVES: Software for pharmacological modeling and statistical analysis is essential for drug development and individualized treatment modeling. This study aims to develop a pharmacokinetic analysis cloud platform that leverages cloud-based benefits, offering a user-friendly interface with a smoother learning curve.

METHODS: The platform was built using Rails as the framework, developed in Julia language, and employs PostgreSQL 14 database, Redis cache, and Sidekiq for asynchronous task management. Four commonly used modules in clinical pharmacology research were developed: Non-compartmental analysis, bioequivalence/bioavailability analysis, compartment model analysis, and population pharmacokinetics modeling. The platform ensured comprehensive data security and traceability through multiple safeguards, including data encryption, access control, transmission encryption, redundant backups, and log management. The platform underwent basic function, performance, reliability, usability, and scalability testing, along with practical case studies.

RESULTS: The CPhaMAS cloud platform successfully implemented the 4 module functionalities. The platform provides a list-based navigation for users, featuring checkbox-style interactions. Through cloud computing, it allows direct online data analysis, saving computer storage and minimizing performance requirements. Modeling and visualization do not require programming knowledge. Basic functionality achieved 100% completion, with an average annual uptime of over 99%. Server response time was between 200 to 500 ms, and average CPU usage was maintained below 30%. In a practical case study, cefotaxime sodium/tazobactam sodium injection (6꞉1 ratio) displayd near-linear pharmacokinetics within a dose range of 1.0 to 4.0 g, with no significant effect of tazobactam on the pharmacokinetic parameters of cefotaxime, validating the platform's usability and reliability.

CONCLUSIONS: CPhaMAS provides an integrated modeling and statistical tool for educators, researchers, and industrial professionals, enabling non-compartmental analysis, bioequivalence/bioavailability analysis, compartmental model building, and population pharmacokinetic modeling and simulation.}, } @article {pmid39787662, year = {2025}, author = {Peng, W and Hong, Y and Chen, Y and Yi, Z}, title = {AIScholar: An OpenFaaS-enhanced cloud platform for intelligent medical data analytics.}, journal = {Computers in biology and medicine}, volume = {186}, number = {}, pages = {109648}, doi = {10.1016/j.compbiomed.2024.109648}, pmid = {39787662}, issn = {1879-0534}, mesh = {Humans ; *Cloud Computing ; *Artificial Intelligence ; Breast Neoplasms/diagnosis ; Female ; *Software ; Data Analytics ; }, abstract = {This paper presents AIScholar, an intelligent research cloud platform developed based on artificial intelligence analysis methods and the OpenFaaS serverless framework, designed for intelligent analysis of clinical medical data with high scalability. AIScholar simplifies the complex analysis process by encapsulating a wide range of medical data analytics methods into a series of customizable cloud tools that emphasize ease of use and expandability, within OpenFaaS's serverless computing framework. As a multifaceted auxiliary tool in medical scientific exploration, AIScholar accelerates the deployment of computational resources, enabling clinicians and scientific personnel to derive new insights from clinical medical data with unprecedented efficiency. A case study focusing on breast cancer clinical data underscores the practicality that AIScholar offers to clinicians for diagnosis and decision-making. Insights generated by the platform have a direct impact on the physicians' ability to identify and address clinical issues, signifying its real-world application significance in clinical practice. Consequently, AIScholar makes a meaningful impact on medical research and clinical practice by providing powerful analytical tools to clinicians and scientific personnel, thereby promoting significant advancements in the analysis of clinical medical data.}, } @article {pmid39776261, year = {2025}, author = {Nolasco, M and Balzarini, M}, title = {Assessment of temporal aggregation of Sentinel-2 images on seasonal land cover mapping and its impact on landscape metrics.}, journal = {Environmental monitoring and assessment}, volume = {197}, number = {2}, pages = {142}, pmid = {39776261}, issn = {1573-2959}, mesh = {*Environmental Monitoring/methods ; *Ecosystem ; *Seasons ; Satellite Imagery ; Geographic Information Systems ; }, abstract = {Landscape metrics (LM) play a crucial role in fields such as urban planning, ecology, and environmental research, providing insights into the ecological and functional dynamics of ecosystems. However, in dynamic systems, generating thematic maps for LM analysis poses challenges due to the substantial data volume required and issues such as cloud cover interruptions. The aim of this study was to compare the accuracy of land cover maps produced by three temporal aggregation methods: median reflectance, maximum normalised difference vegetation index (NDVI), and a two-date image stack using Sentinel-2 (S2) and then to analyse their implications for LM calculation. The Google Earth Engine platform facilitated data filtering, image selection, and aggregation. A random forest algorithm was employed to classify five land cover classes across ten sites, with classification accuracy assessed using global measurements and the Kappa index. LM were then quantified. The analysis revealed that S2 data provided a high-quality, cloud-free dataset suitable for analysis, ensuring a minimum of 25 cloud-free pixels over the study period. The two-date and median methods exhibited superior land cover classification accuracy compared to the max NDVI method. In particular, the two-date method resulted in lower fragmentation-heterogeneity and complexity metrics in the resulting maps compared to the median and max NDVI methods. Nevertheless, the median method holds promise for integration into operational land cover mapping programmes, particularly for larger study areas exceeding the width of S2 swath coverage. We find patch density combined with conditional entropy to be particularly useful metrics for assessing fragmentation and configuration complexity.}, } @article {pmid39774953, year = {2025}, author = {Saeed, A and A Khan, M and Akram, U and J Obidallah, W and Jawed, S and Ahmad, A}, title = {Deep learning based approaches for intelligent industrial machinery health management and fault diagnosis in resource-constrained environments.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {1114}, pmid = {39774953}, issn = {2045-2322}, mesh = {*Deep Learning ; *Industry ; Humans ; Artificial Intelligence ; Internet of Things ; }, abstract = {Industry 4.0 represents the fourth industrial revolution, which is characterized by the incorporation of digital technologies, the Internet of Things (IoT), artificial intelligence, big data, and other advanced technologies into industrial processes. Industrial Machinery Health Management (IMHM) is a crucial element, based on the Industrial Internet of Things (IIoT), which focuses on monitoring the health and condition of industrial machinery. The academic community has focused on various aspects of IMHM, such as prognostic maintenance, condition monitoring, estimation of remaining useful life (RUL), intelligent fault diagnosis (IFD), and architectures based on edge computing. Each of these categories holds its own significance in the context of industrial processes. In this survey, we specifically examine the research on RUL prediction, edge-based architectures, and intelligent fault diagnosis, with a primary focus on the domain of intelligent fault diagnosis. The importance of IFD methods in ensuring the smooth execution of industrial processes has become increasingly evident. However, most methods are formulated under the assumption of complete, balanced, and abundant data, which often does not align with real-world engineering scenarios. The difficulties linked to these classifications of IMHM have received noteworthy attention from the research community, leading to a substantial number of published papers on the topic. While there are existing comprehensive reviews that address major challenges and limitations in this field, there is still a gap in thoroughly investigating research perspectives across RUL prediction, edge-based architectures, and complete intelligent fault diagnosis processes. To fill this gap, we undertake a comprehensive survey that reviews and discusses research achievements in this domain, specifically focusing on IFD. Initially, we classify the existing IFD methods into three distinct perspectives: the method of processing data, which aims to optimize inputs for the intelligent fault diagnosis model and mitigate limitations in the training sample set; the method of constructing the model, which involves designing the structure and features of the model to enhance its resilience to challenges; and the method of optimizing training, which focuses on refining the training process for intelligent fault diagnosis models and emphasizes the importance of ideal data in the training process. Subsequently, the survey covers techniques related to RUL prediction and edge-cloud architectures for resource-constrained environments. Finally, this survey consolidates the outlook on relevant issues in IMHM, explores potential solutions, and offers practical recommendations for further consideration.}, } @article {pmid39772309, year = {2024}, author = {Ibrahem, UM and Alblaihed, MA and Altamimi, AB and Alqirnas, HR and Mahmoud, SM and Salem, MI and Alsaadany, M}, title = {Cloud computing practice activities and mental capacity on developing reproductive health and cognitive absorption.}, journal = {African journal of reproductive health}, volume = {28}, number = {12}, pages = {186-200}, doi = {10.29063/ajrh2024/v28i12.19}, pmid = {39772309}, issn = {1118-4841}, mesh = {Humans ; *Reproductive Health ; Female ; *Cognition ; *Cloud Computing ; Male ; Young Adult ; Students/psychology ; Adult ; Mental Competency ; }, abstract = {The current study aims to determine how the interactions between practice (distributed/focused) and mental capacity (high/low) in the cloud-computing environment (CCE) affect the development of reproductive health skills and cognitive absorption. The study employed an experimental design, and it included a categorical variable for mental capacity (low/high) and an independent variable with two types of activities (distributed/focused). The research sample consisted of 240 students from the College of Science and College of Applied Medical Sciences at the University of Hail's. The sample was divided into four experimental groups. The study's most significant findings were the CCE's apparent favoritism of the group that studied using focused practice style and high mental capacity in the reproductive health skills test, as opposed to distributed practice style and low mental capacity in cognitive absorption. The findings will add to the ongoing debate over which of the two distributed/focused practice activity models is more effective in achieving desired educational results.}, } @article {pmid39771929, year = {2024}, author = {Nur, A and Demise, A and Muanenda, Y}, title = {Design and Evaluation of a Cloud Computing System for Real-Time Measurements in Polarization-Independent Long-Range DAS Based on Coherent Detection.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {24}, pages = {}, pmid = {39771929}, issn = {1424-8220}, support = {G.A.: n. 2022-1-IT02-KA171-HED-000069979//Erasmus+ Student/Staff Mobility Exchange 540 Program/ ; }, abstract = {CloudSim is a versatile simulation framework for modeling cloud infrastructure components that supports customizable and extensible application provisioning strategies, allowing for the simulation of cloud services. On the other hand, Distributed Acoustic Sensing (DAS) is a ubiquitous technique used for measuring vibrations over an extended region. Data handling in DAS remains an open issue, as many applications need continuous monitoring of a volume of samples whose storage and processing in real time require high-capacity memory and computing resources. We employ the CloudSim tool to design and evaluate a cloud computing scheme for long-range, polarization-independent DAS using coherent detection of Rayleigh backscattering signals and uncover valuable insights on the evolution of the processing times for a diverse range of Virtual Machine (VM) capacities as well as sizes of blocks of processed data. Our analysis demonstrates that the choice of VM significantly impacts computational times in real-time measurements in long-range DAS and that achieving polarization independence introduces minimal processing overheads in the system. Additionally, the increase in the block size of processed samples per cycle results in diminishing increments in overall processing times per batch of new samples added, demonstrating the scalability of cloud computing schemes in long-range DAS and its capability to manage larger datasets efficiently.}, } @article {pmid39771862, year = {2024}, author = {Khabti, J and AlAhmadi, S and Soudani, A}, title = {Enhancing Deep-Learning Classification for Remote Motor Imagery Rehabilitation Using Multi-Subject Transfer Learning in IoT Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {24}, pages = {}, pmid = {39771862}, issn = {1424-8220}, mesh = {Humans ; *Brain-Computer Interfaces ; *Deep Learning ; *Electroencephalography/methods ; Internet of Things ; Signal Processing, Computer-Assisted ; }, abstract = {One of the most promising applications for electroencephalogram (EEG)-based brain-computer interfaces (BCIs) is motor rehabilitation through motor imagery (MI) tasks. However, current MI training requires physical attendance, while remote MI training can be applied anywhere, facilitating flexible rehabilitation. Providing remote MI training raises challenges to ensuring an accurate recognition of MI tasks by healthcare providers, in addition to managing computation and communication costs. The MI tasks are recognized through EEG signal processing and classification, which can drain sensor energy due to the complexity of the data and the presence of redundant information, often influenced by subject-dependent factors. To address these challenges, we propose in this paper a multi-subject transfer-learning approach for an efficient MI training framework in remote rehabilitation within an IoT environment. For efficient implementation, we propose an IoT architecture that includes cloud/edge computing as a solution to enhance the system's efficiency and reduce the use of network resources. Furthermore, deep-learning classification with and without channel selection is applied in the cloud, while multi-subject transfer-learning classification is utilized at the edge node. Various transfer-learning strategies, including different epochs, freezing layers, and data divisions, were employed to improve accuracy and efficiency. To validate this framework, we used the BCI IV 2a dataset, focusing on subjects 7, 8, and 9 as targets. The results demonstrated that our approach significantly enhanced the average accuracy in both multi-subject and single-subject transfer-learning classification. In three-subject transfer-learning classification, the FCNNA model achieved up to 79.77% accuracy without channel selection and 76.90% with channel selection. For two-subject and single-subject transfer learning, the application of transfer learning improved the average accuracy by up to 6.55% and 12.19%, respectively, compared to classification without transfer learning. This framework offers a promising solution for remote MI rehabilitation, providing both accurate task recognition and efficient resource usage.}, } @article {pmid39771837, year = {2024}, author = {Barthelemy, J and Iqbal, U and Qian, Y and Amirghasemi, M and Perez, P}, title = {Safety After Dark: A Privacy Compliant and Real-Time Edge Computing Intelligent Video Analytics for Safer Public Transportation.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {24}, pages = {}, pmid = {39771837}, issn = {1424-8220}, mesh = {Humans ; *Video Recording ; *Artificial Intelligence ; Transportation/methods ; Safety ; Privacy ; Deep Learning ; }, abstract = {Public transportation systems play a vital role in modern cities, but they face growing security challenges, particularly related to incidents of violence. Detecting and responding to violence in real time is crucial for ensuring passenger safety and the smooth operation of these transport networks. To address this issue, we propose an advanced artificial intelligence (AI) solution for identifying unsafe behaviours in public transport. The proposed approach employs deep learning action recognition models and utilises technologies like NVIDIA DeepStream SDK, Amazon Web Services (AWS) DirectConnect, local edge computing server, ONNXRuntime and MQTT to accelerate the end-to-end pipeline. The solution captures video streams from remote train stations closed circuit television (CCTV) networks, processes the data in the cloud, applies the action recognition model, and transmits the results to a live web application. A temporal pyramid network (TPN) action recognition model was trained on a newly curated video dataset mixing open-source resources and live simulated trials to identify the unsafe behaviours. The base model was able to achieve a validation accuracy of 93% when trained using open-source dataset samples and was improved to 97% when live simulated dataset was included during the training. The developed AI system was deployed at Wollongong Train Station (NSW, Australia) and showcased impressive accuracy in detecting violence incidents during an 8-week test period, achieving a reliable false-positive (FP) rate of 23%. While the AI correctly identified 30 true-positive incidents, there were 6 cases of false negatives (FNs) where violence incidents were missed during the rainy weather suggesting more data in the training dataset related to bad weather. The AI model's continuous retraining capability ensures its adaptability to various real-world scenarios, making it a valuable tool for enhancing safety and the overall passenger experience in public transport settings.}, } @article {pmid39771772, year = {2024}, author = {Li, L and Zhu, L and Li, W}, title = {Cloud-Edge-End Collaborative Federated Learning: Enhancing Model Accuracy and Privacy in Non-IID Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {24}, pages = {}, pmid = {39771772}, issn = {1424-8220}, support = {62371098//the National Natural Science Foundation of China/ ; }, abstract = {Cloud-edge-end computing architecture is crucial for large-scale edge data processing and analysis. However, the diversity of terminal nodes and task complexity in this architecture often result in non-independent and identically distributed (non-IID) data, making it challenging to balance data heterogeneity and privacy protection. To address this, we propose a privacy-preserving federated learning method based on cloud-edge-end collaboration. Our method fully considers the three-tier architecture of cloud-edge-end systems and the non-IID nature of terminal node data. It enhances model accuracy while protecting the privacy of terminal node data. The proposed method groups terminal nodes based on the similarity of their data distributions and constructs edge subnetworks for training in collaboration with edge nodes, thereby mitigating the negative impact of non-IID data. Furthermore, we enhance WGAN-GP with attention mechanism to generate balanced synthetic data while preserving key patterns from original datasets, reducing the adverse effects of non-IID data on global model accuracy while preserving data privacy. In addition, we introduce data resampling and loss function weighting strategies to mitigate model bias caused by imbalanced data distribution. Experimental results on real-world datasets demonstrate that our proposed method significantly outperforms existing approaches in terms of model accuracy, F1-score, and other metrics.}, } @article {pmid39771702, year = {2024}, author = {Cruz Castañeda, WA and Bertemes Filho, P}, title = {Improvement of an Edge-IoT Architecture Driven by Artificial Intelligence for Smart-Health Chronic Disease Management.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {24}, pages = {}, pmid = {39771702}, issn = {1424-8220}, support = {Coordenação de Aperfeiçoamento de Pessoal de Nível Superior-Brasil (CAPES)//Coordenação de Aperfeiçoamento de Pessoal de Nível Superior-Brasil (CAPES)/ ; }, mesh = {*Artificial Intelligence ; Humans ; *Algorithms ; *Internet of Things ; Chronic Disease ; Wearable Electronic Devices ; Disease Management ; }, abstract = {One of the health challenges in the 21st century is to rethink approaches to non-communicable disease prevention. A solution is a smart city that implements technology to make health smarter, enables healthcare access, and contributes to all residents' overall well-being. Thus, this paper proposes an architecture to deliver smart health. The architecture is anchored in the Internet of Things and edge computing, and it is driven by artificial intelligence to establish three foundational layers in smart care. Experimental results in a case study on glucose prediction noninvasively show that the architecture senses and acquires data that capture relevant characteristics. The study also establishes a baseline of twelve regression algorithms to assess the non-invasive glucose prediction performance regarding the mean squared error, root mean squared error, and r-squared score, and the catboost regressor outperforms the other models with 218.91 and 782.30 in MSE, 14.80 and 27.97 in RMSE, and 0.81 and 0.31 in R2, respectively, on training and test sets. Future research works involve extending the performance of the algorithms with new datasets, creating and optimizing embedded AI models, deploying edge-IoT with embedded AI for wearable devices, implementing an autonomous AI cloud engine, and implementing federated learning to deliver scalable smart health in a smart city context.}, } @article {pmid39766661, year = {2024}, author = {Podgorelec, D and Strnad, D and Kolingerová, I and Žalik, B}, title = {State-of-the-Art Trends in Data Compression: COMPROMISE Case Study.}, journal = {Entropy (Basel, Switzerland)}, volume = {26}, number = {12}, pages = {}, pmid = {39766661}, issn = {1099-4300}, support = {J2-4458 and P2-0041//Slovene Research and Innovation Agency/ ; 23-04622L//Czech Science Foundation/ ; }, abstract = {After a boom that coincided with the advent of the internet, digital cameras, digital video and audio storage and playback devices, the research on data compression has rested on its laurels for a quarter of a century. Domain-dependent lossy algorithms of the time, such as JPEG, AVC, MP3 and others, achieved remarkable compression ratios and encoding and decoding speeds with acceptable data quality, which has kept them in common use to this day. However, recent computing paradigms such as cloud computing, edge computing, the Internet of Things (IoT), and digital preservation have gradually posed new challenges, and, as a consequence, development trends in data compression are focusing on concepts that were not previously in the spotlight. In this article, we try to critically evaluate the most prominent of these trends and to explore their parallels, complementarities, and differences. Digital data restoration mimics the human ability to omit memorising information that is satisfactorily retrievable from the context. Feature-based data compression introduces a two-level data representation with higher-level semantic features and with residuals that correct the feature-restored (predicted) data. The integration of the advantages of individual domain-specific data compression methods into a general approach is also challenging. To the best of our knowledge, a method that addresses all these trends does not exist yet. Our methodology, COMPROMISE, has been developed exactly to make as many solutions to these challenges as possible inter-operable. It incorporates features and digital restoration. Furthermore, it is largely domain-independent (general), asymmetric, and universal. The latter refers to the ability to compress data in a common framework in a lossy, lossless, and near-lossless mode. COMPROMISE may also be considered an umbrella that links many existing domain-dependent and independent methods, supports hybrid lossless-lossy techniques, and encourages the development of new data compression algorithms.}, } @article {pmid39762874, year = {2025}, author = {Yang, M and Zhu, X and Yan, F and Huang, X and Wu, Z and Jiang, X and Huang, Y and Li, Z}, title = {Digital-based emergency prevention and control system: enhancing infection control in psychiatric hospitals.}, journal = {BMC medical informatics and decision making}, volume = {25}, number = {1}, pages = {7}, pmid = {39762874}, issn = {1472-6947}, support = {2022-YF05-01867-SN//Chengdu Science and Technology Bureau/ ; 62073058, 62373079//National Natural Science Foundation of China/ ; 24ZYZYTS0107, 25NSFSC0577//Science and Technology Department of Sichuan Province/ ; 24CXTD11//Health Commission of Sichuan Province/ ; S23012//Sichuan medical association/ ; 2021057, 2024141//Chengdu Municipal Health Commission/ ; 2019B030316001//the Science and Technology Plan Project of Guangdong Province/ ; No.VRLAB2022 B02//Guangzhou municipal key discipline in medicine (2021-2023) and Open Project Program of State Key Laboratory of Virtual Reality Technology and Systems, Beihang University/ ; 21-K03//Shanghai Key Laboratory of Psychotic Disorders Open Grant/ ; }, mesh = {Humans ; *Hospitals, Psychiatric ; *Infection Control/methods ; COVID-19/prevention & control ; Cross Infection/prevention & control ; }, abstract = {BACKGROUND: The practical application of infectious disease emergency plans in mental health institutions during the ongoing pandemic has revealed significant shortcomings. These manifest as chaotic management of mental health care, a lack of hospital infection prevention and control (IPC) knowledge among medical staff, and unskilled practical operation. These factors result in suboptimal decision-making and emergency response execution. Consequently, we have developed a digital-based emergency prevention and control system to reinforce IPC management in psychiatric hospitals and enhance the hospital IPC capabilities of medical staff.

METHODS: The system incorporates modern technologies such as cloud computing, big data, streaming media, and knowledge graphs. A cloud service platform was established at the PaaS layer using Docker container technology to manage infectious disease emergency-related services. The system provides application services to various users through a Browser/Server Architecture. The system was implemented in a class A tertiary mental health center from March 1st, 2022, to February 28th, 2023. Twelve months of emergency IPC training and education were conducted based on the system. The system's functions and the users' IPC capabilities were evaluated.

RESULTS: A total of 116 employees participated in using the system. The system performance evaluation indicated that functionality (3.78 ± 0.68), practicality (4.02 ± 0.74), reliability (3.45 ± 0.50), efficiency (4.14 ± 0.69), accuracy (3.36 ± 0.58), and assessability (3.05 ± 0.47) met basic levels (> 3), with efficiency improvement and practicality achieving a good level (> 4). After 12 months of training and study based on the system, the participants demonstrated improved emergency knowledge (χ[2] = 37.69, p < 0.001) and skills (p < 0.001).

CONCLUSION: The findings of this study indicate that the digital-based emergency IPC system has the potential to enhance the emergency IPC knowledge base and operational skills of medical personnel in psychiatric hospitals. Furthermore, the medical personnel appear to be better adapted to the system. Consequently, the system has the capacity to facilitate the emergency IPC response of psychiatric institutions to infectious diseases, while simultaneously optimising the training and educational methodologies employed in emergency prevention and control. The promotion and application of this system in psychiatric institutions has the potential to accelerate the digitalisation and intelligence construction of psychiatric hospitals.}, } @article {pmid39759485, year = {2024}, author = {Vandewinckele, L and Benazzouz, C and Delombaerde, L and Pape, L and Reynders, T and Van der Vorst, A and Callens, D and Verstraete, J and Baeten, A and Weltens, C and Crijns, W}, title = {Pro-active risk analysis of an in-house developed deep learning based autoplanning tool for breast Volumetric Modulated Arc Therapy.}, journal = {Physics and imaging in radiation oncology}, volume = {32}, number = {}, pages = {100677}, pmid = {39759485}, issn = {2405-6316}, abstract = {BACKGROUND AND PURPOSE: With the increasing amount of in-house created deep learning models in radiotherapy, it is important to know how to minimise the risks associated with the local clinical implementation prior to clinical use. The goal of this study is to give an example of how to identify the risks and find mitigation strategies to reduce these risks in an implemented workflow containing a deep learning based planning tool for breast Volumetric Modulated Arc Therapy.

MATERIALS AND METHODS: The deep learning model ran on a private Google Cloud environment for adequate computational capacity and was integrated into a workflow that could be initiated within the clinical Treatment Planning System (TPS). A proactive Failure Mode and Effect Analysis (FMEA) was conducted by a multidisciplinary team, including physicians, physicists, dosimetrists, technologists, quality managers, and the research and development team. Failure modes categorised as 'Not acceptable' and 'Tolerable' on the risk matrix were further examined to find mitigation strategies.

RESULTS: In total, 39 failure modes were defined for the total workflow, divided over four steps. Of these, 33 were deemed 'Acceptable', five 'Tolerable', and one 'Not acceptable'. Mitigation strategies, such as a case-specific Quality Assurance report, additional scripted checks and properties, a pop-up window, and time stamp analysis, reduced the failure modes to two 'Tolerable' and none in the 'Not acceptable' region.

CONCLUSIONS: The pro-active risk analysis revealed possible risks in the implemented workflow and led to the implementation of mitigation strategies that decreased the risk scores for safer clinical use.}, } @article {pmid39753651, year = {2025}, author = {Li, S and Wan, H and Yu, Q and Wang, X}, title = {Downscaling of ERA5 reanalysis land surface temperature based on attention mechanism and Google Earth Engine.}, journal = {Scientific reports}, volume = {15}, number = {1}, pages = {675}, pmid = {39753651}, issn = {2045-2322}, support = {2022YFE0203800//National Key Research and Development Program of China/ ; 2022YFE0203800//National Key Research and Development Program of China/ ; }, abstract = {Land Surface Temperature (LST) is widely recognized as a sensitive indicator of climate change, and it plays a significant role in ecological research. The ERA5-Land LST dataset, developed and managed by the European Centre for Medium-Range Weather Forecasts (ECMWF), is extensively used for global or regional LST studies. However, its fine-scale application is limited by its low spatial resolution. Therefore, to improve the spatial resolution of ERA5-Land LST data, this study proposes an Attention Mechanism U-Net (AMUN) method, which combines data acquisition and preprocessing on the Google Earth Engine (GEE) cloud computing platform, to downscale the hourly monthly mean reanalysis LST data of ERA5-Land across China's territory from 0.1° to 0.01°. This method comprehensively considers the relationship between the LST and surface features, organically combining multiple deep learning modules, includes the Global Multi-Factor Cross-Attention (GMFCA) module, the Feature Fusion Residual Dense Block (FFRDB) connection module, and the U-Net module. In addition, the Bayesian global optimization algorithm is used to select the optimal hyperparameters of the network in order to enhance the predictive performance of the model. Finally, the downscaling accuracy of the network was evaluated through simulated data experiments and real data experiments and compared with the Random Forest (RF) method. The results show that the network proposed in this study outperforms the RF method, with RMSE reduced by approximately 32-51%. The downscaling method proposed in this study can effectively improve the accuracy of ERA5-Land LST downscaling, providing new insights for LST downscaling research.}, } @article {pmid39735232, year = {2024}, author = {Belbase, P and Bhusal, R and Ghimire, SS and Sharma, S and Banskota, B}, title = {Assuring assistance to healthcare and medicine: Internet of Things, Artificial Intelligence, and Artificial Intelligence of Things.}, journal = {Frontiers in artificial intelligence}, volume = {7}, number = {}, pages = {1442254}, pmid = {39735232}, issn = {2624-8212}, abstract = {INTRODUCTION: The convergence of healthcare with the Internet of Things (IoT) and Artificial Intelligence (AI) is reshaping medical practice with promising enhanced data-driven insights, automated decision-making, and remote patient monitoring. It has the transformative potential of these technologies to revolutionize diagnosis, treatment, and patient care.

PURPOSE: This study aims to explore the integration of IoT and AI in healthcare, outlining their applications, benefits, challenges, and potential risks. By synthesizing existing literature, this study aims to provide insights into the current landscape of AI, IoT, and AIoT in healthcare, identify areas for future research and development, and establish a framework for the effective use of AI in health.

METHOD: A comprehensive literature review included indexed databases such as PubMed/Medline, Scopus, and Google Scholar. Key search terms related to IoT, AI, healthcare, and medicine were employed to identify relevant studies. Papers were screened based on their relevance to the specified themes, and eventually, a selected number of papers were methodically chosen for this review.

RESULTS: The integration of IoT and AI in healthcare offers significant advancements, including remote patient monitoring, personalized medicine, and operational efficiency. Wearable sensors, cloud-based data storage, and AI-driven algorithms enable real-time data collection, disease diagnosis, and treatment planning. However, challenges such as data privacy, algorithmic bias, and regulatory compliance must be addressed to ensure responsible deployment of these technologies.

CONCLUSION: Integrating IoT and AI in healthcare holds immense promise for improving patient outcomes and optimizing healthcare delivery. Despite challenges such as data privacy concerns and algorithmic biases, the transformative potential of these technologies cannot be overstated. Clear governance frameworks, transparent AI decision-making processes, and ethical considerations are essential to mitigate risks and harness the full benefits of IoT and AI in healthcare.}, } @article {pmid39732323, year = {2024}, author = {Dommer, J and Van Doorslaer, K and Afrasiabi, C and Browne, K and Ezeji, S and Kim, L and Dolan, M and McBride, AA}, title = {PaVE 2.0: Behind the Scenes of the Papillomavirus Episteme.}, journal = {Journal of molecular biology}, volume = {}, number = {}, pages = {168925}, doi = {10.1016/j.jmb.2024.168925}, pmid = {39732323}, issn = {1089-8638}, abstract = {The Papilloma Virus Episteme (PaVE) https://pave.niaid.nih.gov/ was initiated by NIAID in 2008 to provide a highly curated bioinformatic and knowledge resource for the papillomavirus scientific community. It rapidly became the fundamental and core resource for papillomavirus researchers and clinicians worldwide. Over time, the software infrastructure became severely outdated. In PaVE 2.0, the underlying libraries and hosting platform have been completely upgraded and rebuilt using Amazon Web Services (AWS) tools and automated CI/CD (continuous integration and deployment) pipelines for deployment of the application and data (now in AWS S3 cloud storage). PaVE 2.0 is hosted on three AWS ECS (elastic container service) using the NIAID Operations & Engineering Branch's Monarch tech stack and terraform. A new Celery queue supports longer running tasks. The framework is Python Flask with a JavaScript/JINJA template front end, and the database switched from MySQL to Neo4j. A Swagger API (Application Programming Interface) performs database queries, and executes jobs for BLAST, MAFFT, and the L1 typing tooland will allow future programmatic data access. All major tools such as BLAST, the L1 typing tool, genome locus viewer, phylogenetic tree generator, multiple sequence alignment, and protein structure viewer were modernized and enhanced to support more users. Multiple sequence alignment uses MAFFT instead of COBALT. The protein structure viewer was changed from Jmol to Mol*, the new embeddable viewer used by RCSB (Research Collaboratory for Structural Bioinformatics). In summary, PaVE 2.0 allows us to continue to provide this essential resource with an open-source framework that could be used as a template for molecular biology databases of other viruses.}, } @article {pmid39730563, year = {2024}, author = {Dugyala, R and Chithaluru, P and Ramchander, M and Kumar, S and Yadav, A and Yadav, NS and Elminaam, DSA and Alsekait, DM}, title = {Secure cloud computing: leveraging GNN and leader K-means for intrusion detection optimization.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {30906}, doi = {10.1038/s41598-024-81442-7}, pmid = {39730563}, issn = {2045-2322}, support = {PNURSP2024R435//Princess Nourah Bint Abdulrahman University/ ; }, abstract = {Over the past two decades, cloud computing has experienced exponential growth, becoming a critical resource for organizations and individuals alike. However, this rapid adoption has introduced significant security challenges, particularly in intrusion detection, where traditional systems often struggle with low detection accuracy and high processing times. To address these limitations, this research proposes an optimized Intrusion Detection System (IDS) that leverages Graph Neural Networks and the Leader K-means clustering algorithm. The primary aim of the study is to enhance both the accuracy and efficiency of intrusion detection within cloud environments. Key contributions of this work include the integration of the Leader K-means algorithm for effective data clustering, improving the IDS's ability to differentiate between normal and malicious activities. Additionally, the study introduces an optimized Grasshopper Optimization algorithm, which enhances the performance of the Optimal Neural Network, further refining detection accuracy. For added data security, the system incorporates Advanced Encryption Standard encryption and steganography, ensuring robust protection of sensitive information. The proposed solution has been implemented on the Java platform with CloudSim support, and the findings demonstrate a significant improvement in both detection accuracy and processing efficiency compared to existing methods. This research presents a comprehensive solution to the ongoing security challenges in cloud computing, offering a valuable contribution to the field.}, } @article {pmid39730383, year = {2024}, author = {Ahmad, SZ and Qamar, F}, title = {A hybrid AI based framework for enhancing security in satellite based IoT networks using high performance computing architecture.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {30695}, pmid = {39730383}, issn = {2045-2322}, abstract = {IoT device security has become a major concern as a result of the rapid expansion of the Internet of Things (IoT) and the growing adoption of cloud computing for central monitoring and management. In order to provide centrally managed services each IoT device have to connect to their respective High-Performance Computing (HPC) clouds. The ever increasing deployment of Internet of Things (IoT) devices linked to HPC clouds use various medium such as wired and wireless. The security challenges increases further when these devices communicate over satellite links. This Satellite-Based IoT-HPC Cloud architecture poses new security concerns which exacerbates this problem. An intrusion detection technology integrated in the central cloud is suggested as a potential remedy to monitor and detect aberrant activity within the network in order to allay these worries. However, the enormous amounts of data generated by IoT devices and their constrained computing power dose not allow to implement IDS techniques at source and renders towards typical central Intrusion Detection Systems (IDS) ineffectiveness. Moreover, to protect these systems, powerful intrusion detection techniques are required due to the inherent vulnerabilities of IoT devices and the possible hazards during data transmission.During the course of literature survey it is revealed that the research work has been done to detect few types of attacks by using the old school model of IDS. The computational expensiveness in terms of processing time is also an important parameter to be considered. This work introduces a novel Embedded Hybrid Deep Learning-based intrusion detection technique (EHID) based on embedded hybrid deep learning that is created specifically for IoT devices linked to HPC clouds via satellite connectivity. Two Deep Learning (DL) algorithms are integrated in the proposed method to improve detection abilities with decent accuracy while considering the processing time and number of trainable parameters to detect 14 types of threats. It segregates among the normal and attack traffic. We also modify the conventional IDS approach and propose architectural change to harness the processing power of central server of cloud. This hybrid approach effectively detects threats by harnessing the computing power available at HPC cloud along with leveraging the power of AI. Additionally, the proposed system enables real-time monitoring and detection of intrusions while providing monitoring and management services through HPC using IoT-generated data. Experiments on Edge-IIoTset Cyber Security Dataset of IoT & IIoT indicate improved detection accuracy, reduced false positives, and efficient computational performance.}, } @article {pmid39728223, year = {2024}, author = {Salcedo, E}, title = {Computer Vision-Based Gait Recognition on the Edge: A Survey on Feature Representations, Models, and Architectures.}, journal = {Journal of imaging}, volume = {10}, number = {12}, pages = {}, pmid = {39728223}, issn = {2313-433X}, abstract = {Computer vision-based gait recognition (CVGR) is a technology that has gained considerable attention in recent years due to its non-invasive, unobtrusive, and difficult-to-conceal nature. Beyond its applications in biometrics, CVGR holds significant potential for healthcare and human-computer interaction. Current CVGR systems often transmit collected data to a cloud server for machine learning-based gait pattern recognition. While effective, this cloud-centric approach can result in increased system response times. Alternatively, the emerging paradigm of edge computing, which involves moving computational processes to local devices, offers the potential to reduce latency, enable real-time surveillance, and eliminate reliance on internet connectivity. Furthermore, recent advancements in low-cost, compact microcomputers capable of handling complex inference tasks (e.g., Jetson Nano Orin, Jetson Xavier NX, and Khadas VIM4) have created exciting opportunities for deploying CVGR systems at the edge. This paper reports the state of the art in gait data acquisition modalities, feature representations, models, and architectures for CVGR systems suitable for edge computing. Additionally, this paper addresses the general limitations and highlights new avenues for future research in the promising intersection of CVGR and edge computing.}, } @article {pmid39720202, year = {2025}, author = {Chen, J and Hoops, S and Mortveit, HS and Lewis, BL and Machi, D and Bhattacharya, P and Venkatramanan, S and Wilson, ML and Barrett, CL and Marathe, MV}, title = {Epihiper-A high performance computational modeling framework to support epidemic science.}, journal = {PNAS nexus}, volume = {4}, number = {1}, pages = {pgae557}, pmid = {39720202}, issn = {2752-6542}, support = {R01 GM109718/GM/NIGMS NIH HHS/United States ; }, abstract = {This paper describes Epihiper, a state-of-the-art, high performance computational modeling framework for epidemic science. The Epihiper modeling framework supports custom disease models, and can simulate epidemics over dynamic, large-scale networks while supporting modulation of the epidemic evolution through a set of user-programmable interventions. The nodes and edges of the social-contact network have customizable sets of static and dynamic attributes which allow the user to specify intervention target sets at a very fine-grained level; these also permit the network to be updated in response to nonpharmaceutical interventions, such as school closures. The execution of interventions is governed by trigger conditions, which are Boolean expressions formed using any of Epihiper's primitives (e.g. the current time, transmissibility) and user-defined sets (e.g. people with work activities). Rich expressiveness, extensibility, and high-performance computing responsiveness were central design goals to ensure that the framework could effectively target realistic scenarios at the scale and detail required to support the large computational designs needed by state and federal public health policymakers in their efforts to plan and respond in the event of epidemics. The modeling framework has been used to support the CDC Scenario Modeling Hub for COVID-19 response, and was a part of a hybrid high-performance cloud system that was nominated as a finalist for the 2021 ACM Gordon Bell Special Prize for high performance computing-based COVID-19 Research.}, } @article {pmid39696687, year = {2024}, author = {Blindenbach, J and Kang, J and Hong, S and Karam, C and Lehner, T and Gürsoy, G}, title = {SQUiD: ultra-secure storage and analysis of genetic data for the advancement of precision medicine.}, journal = {Genome biology}, volume = {25}, number = {1}, pages = {314}, pmid = {39696687}, issn = {1474-760X}, support = {R00 HG010909/HG/NHGRI NIH HHS/United States ; R35 GM147004/GM/NIGMS NIH HHS/United States ; R35GM147004/GM/NIGMS NIH HHS/United States ; R00HG010909/HG/NHGRI NIH HHS/United States ; }, mesh = {*Precision Medicine ; Humans ; Computer Security ; Databases, Genetic ; Cloud Computing ; Information Storage and Retrieval ; }, abstract = {Cloud computing allows storing the ever-growing genotype-phenotype datasets crucial for precision medicine. Due to the sensitive nature of this data and varied laws and regulations, additional security measures are needed to ensure data privacy. We develop SQUiD, a secure queryable database for storing and analyzing genotype-phenotype data. SQUiD allows storage and secure querying of data in a low-security, low-cost public cloud using homomorphic encryption in a multi-client setting. We demonstrate SQUiD's practical usability and scalability using synthetic and UK Biobank data.}, } @article {pmid39688784, year = {2024}, author = {Ma'moun, S and Farag, R and Abutaleb, K and Metwally, A and Ali, A and Yones, M}, title = {Habitat Suitability Modelling for the Red Dwarf Honeybee (Apis florea (Linnaeus)) and Its Distribution Prediction Using Machine Learning and Cloud Computing.}, journal = {Neotropical entomology}, volume = {54}, number = {1}, pages = {18}, pmid = {39688784}, issn = {1678-8052}, mesh = {Animals ; Bees/physiology ; *Ecosystem ; Egypt ; *Machine Learning ; *Animal Distribution ; Temperature ; }, abstract = {Apis florea bees were recently identified in Egypt, marking the second occurrence of this species on the African continent. The objective of this study was to track the distribution of A. florea in Egypt and evaluate its potential for invasive behaviour. Field surveys were conducted over a 2-year period, resulting in the collection of data on the spatial distribution of the red dwarf honeybees. A comprehensive analysis was performed utilizing long-term monthly temperature and rainfall data to generate spatially interpolated climate surfaces with a 1-km resolution. Vegetation variables derived from Terra MODIS were also incorporated. Furthermore, elevation data obtained from the Shuttle Radar Topography Mission were utilized to derive slope, aspect, and hillshade based on the digital elevation model. The collected data were subject to resampling for optimal data smoothing. Subsequently, a random forest model was applied, followed by an accuracy assessment to evaluate the classification output. The results indicated the selection of the mean temperature of coldest quarter (bio11), annual mean temperature (bio01), and minimum temperature of coldest month (bio06) as temperature-derived parameters are the most important parameters. Annual precipitation (bio12) and precipitation of wettest quarter (bio16) as precipitation parameters, and non-tree vegetation parameter as well as the elevation. The calculation of the Habitat Suitability Index revealed that the most suitable areas, covering a total of 200131.9 km[2], were predominantly situated in the eastern and northern regions of Egypt, including the Nile Delta characterized by its fertile agricultural lands and the presence of the river Nile. In contrast, the western and southern parts exhibited low habitat suitability due to the absence of significant green vegetation and low relative humidity.}, } @article {pmid39678283, year = {2024}, author = {Zhou, J and Chen, S and Kuang, H and Wang, X}, title = {Optimal robust configuration in cloud environment based on heuristic optimization algorithm.}, journal = {PeerJ. Computer science}, volume = {10}, number = {}, pages = {e2350}, pmid = {39678283}, issn = {2376-5992}, abstract = {To analyze performance in cloud computing, some unpredictable perturbations that may lead to performance degradation are essential factors that should not be neglected. To prevent performance degradation in cloud computing systems, it is reasonable to measure the impact of the perturbations and propose a robust configuration strategy to maintain the performance of the system at an acceptable level. In this article, unlike previous research focusing on profit maximization and waiting time minimization, our study starts with the bottom line of expected performance degradation due to perturbation. The bottom line is quantified as the minimum acceptable profit and the maximum acceptable waiting time, and then the corresponding feasible region is defined. By comparing between the system's actual working performance and the bottom line, the concept of robustness is invoked as a guiding basis for configuring server size and speed in feasible regions, so that the performance of the cloud computing system can be maintained at an acceptable level when perturbed. Subsequently, to improve the robustness of the system as much as possible, discuss the robustness measurement method. A heuristic optimization algorithm is proposed and compared with other heuristic optimization algorithms to verify the performance of the algorithm. Experimental results show that the magnitude error of the solution of our algorithm compared with the most advanced benchmark scheme is on the order of 10[-6], indicating the accuracy of our solution.}, } @article {pmid39671840, year = {2025}, author = {Mou, T and Liu, Y}, title = {Utilizing the cloud-based satellite platform to explore the dynamics of coastal aquaculture ponds from 1986 to 2020 in Shandong Province, China.}, journal = {Marine pollution bulletin}, volume = {211}, number = {}, pages = {117414}, doi = {10.1016/j.marpolbul.2024.117414}, pmid = {39671840}, issn = {1879-3363}, mesh = {China ; *Aquaculture ; *Ponds ; *Environmental Monitoring/methods ; *Satellite Imagery ; Remote Sensing Technology ; Wetlands ; Ecosystem ; }, abstract = {Coastal pond aqua farming is critical in aquaculture and significantly contributes to the seafood supply. Meanwhile, the development of aquaculture ponds also threatens vulnerable wetland resources and coastal ecosystems. Accurate statistics regarding the distribution and variability of coastal pond aquaculture are crucial for balancing the sustainable development of coastal aquaculture and preserving the coastal environment and ecosystems. Satellite imagery offers a valuable tool for detecting spatial-temporal information related to these coastal ponds. Furthermore, integrating multiple remote sensing images to acquire comprehensive spatial information about the coastal ponds remains challenging. This study utilized a decision-tree classifier applied to Landsat data to detect the spatial distribution of coastal ponds in Shandong Province from 1986 to 2020, with data analyzed at five-year intervals, primarily based on the Google Earth Engine cloud platform. A pond map in 2020, extracted from Sentinel-2 imagery, was used as a reference map and combined with the results from Landsat data to explore the landscape changes of coastal ponds. The results indicated that Shandong Province's coastal pond area underwent significant expansion before 1990, followed by slower growth from 1990 to 2010 and eventual shrinkage after 2010. Specifically, the pond area expanded from 428.38 km[2] in 1986 to a peak of 2149.51 km[2] in 2010 before contracting to 2012.39 km[2] in 2020. The region near Bohai Bay emerged as the epicenter of Shandong's coastal aquaculture, encompassing 62 % of the total pond area in 2020. The government policies previously promoted the expansion of coastal pond farming but shifted to curbing the uncontrolled development of aquaculture ponds.}, } @article {pmid39670410, year = {2025}, author = {Alipio, K and García-Colón, J and Boscarino, N and Fox, K}, title = {Indigenous Data Sovereignty, Circular Systems, and Solarpunk Solutions for a Sustainable Future.}, journal = {Pacific Symposium on Biocomputing. Pacific Symposium on Biocomputing}, volume = {30}, number = {}, pages = {717-733}, pmid = {39670410}, issn = {2335-6936}, mesh = {*Artificial Intelligence ; *Computational Biology ; Humans ; Cloud Computing/statistics & numerical data ; Sustainable Development ; Indigenous Peoples/statistics & numerical data ; Conservation of Natural Resources ; }, abstract = {Recent advancements in Artificial Intelligence (AI) and data center infrastructure have brought the global cloud computing market to the forefront of conversations about sustainability and energy use. Current policy and infrastructure for data centers prioritize economic gain and resource extraction, inherently unsustainable models which generate massive amounts of energy and heat waste. Our team proposes the formation of policy around earth-friendly computation practices rooted in Indigenous models of circular systems of sustainability. By looking to alternative systems of sustainability rooted in Indigenous values of aloha 'āina, or love for the land, we find examples of traditional ecological knowledge (TEK) that can be imagined alongside Solarpunk visions for a more sustainable future. One in which technology works with the environment, reusing electronic waste (e-waste) and improving data life cycles.}, } @article {pmid39670372, year = {2025}, author = {Ramwala, OA and Lowry, KP and Hippe, DS and Unrath, MPN and Nyflot, MJ and Mooney, SD and Lee, CI}, title = {ClinValAI: A framework for developing Cloud-based infrastructures for the External Clinical Validation of AI in Medical Imaging.}, journal = {Pacific Symposium on Biocomputing. Pacific Symposium on Biocomputing}, volume = {30}, number = {}, pages = {215-228}, pmid = {39670372}, issn = {2335-6936}, mesh = {Humans ; *Cloud Computing ; *Algorithms ; *Computational Biology ; Female ; *Artificial Intelligence ; *Diagnostic Imaging/standards/statistics & numerical data ; Workflow ; Mammography/statistics & numerical data/standards/methods ; Breast Neoplasms/diagnostic imaging ; Validation Studies as Topic ; }, abstract = {Artificial Intelligence (AI) algorithms showcase the potential to steer a paradigm shift in clinical medicine, especially medical imaging. Concerns associated with model generalizability and biases necessitate rigorous external validation of AI algorithms prior to their adoption into clinical workflows. To address the barriers associated with patient privacy, intellectual property, and diverse model requirements, we introduce ClinValAI, a framework for establishing robust cloud-based infrastructures to clinically validate AI algorithms in medical imaging. By featuring dedicated workflows for data ingestion, algorithm scoring, and output processing, we propose an easily customizable method to assess AI models and investigate biases. Our novel orchestration mechanism facilitates utilizing the complete potential of the cloud computing environment. ClinValAI's input auditing and standardization mechanisms ensure that inputs consistent with model prerequisites are provided to the algorithm for a streamlined validation. The scoring workflow comprises multiple steps to facilitate consistent inferencing and systematic troubleshooting. The output processing workflow helps identify and analyze samples with missing results and aggregates final outputs for downstream analysis. We demonstrate the usability of our work by evaluating a state-of-the-art breast cancer risk prediction algorithm on a large and diverse dataset of 2D screening mammograms. We perform comprehensive statistical analysis to study model calibration and evaluate performance on important factors, including breast density, age, and race, to identify latent biases. ClinValAI provides a holistic framework to validate medical imaging models and has the potential to advance the development of generalizable AI models in clinical medicine and promote health equity.}, } @article {pmid39670335, year = {2024}, author = {Anderson, W and Bhatnagar, R and Scollick, K and Schito, M and Walls, R and Podichetty, JT}, title = {Real-world evidence in the cloud: Tutorial on developing an end-to-end data and analytics pipeline using Amazon Web Services resources.}, journal = {Clinical and translational science}, volume = {17}, number = {12}, pages = {e70078}, pmid = {39670335}, issn = {1752-8062}, mesh = {*Cloud Computing ; Humans ; Drug Development/methods ; Internet ; Databases, Factual/statistics & numerical data ; Software ; Machine Learning ; }, abstract = {In the rapidly evolving landscape of healthcare and drug development, the ability to efficiently collect, process, and analyze large volumes of real-world data (RWD) is critical for advancing drug development. This article provides a blueprint for establishing an end-to-end data and analytics pipeline in a cloud-based environment. The pipeline presented here includes four major components, including data ingestion, transformation, visualization, and analytics, each supported by a suite of Amazon Web Services (AWS) tools. The pipeline is exemplified through the CURE ID platform, a collaborative tool designed to capture and analyze real-world, off-label treatment administrations. By using services such as AWS Lambda, Amazon Relational Database Service (RDS), Amazon QuickSight, and Amazon SageMaker, the pipeline facilitates the ingestion of diverse data sources, the transformation of raw data into structured formats, the creation of interactive dashboards for data visualization, and the application of advanced machine learning models for data analytics. The described architecture not only supports the needs of the CURE ID platform, but also offers a scalable and adaptable framework that can be applied across various domains to enhance data-driven decision making beyond drug repurposing.}, } @article {pmid39669765, year = {2024}, author = {Bao, H and Yuan, M and Deng, H and Xu, J and Zhao, Y}, title = {Secure multiparty computation protocol based on homomorphic encryption and its application in blockchain.}, journal = {Heliyon}, volume = {10}, number = {14}, pages = {e34458}, pmid = {39669765}, issn = {2405-8440}, abstract = {Blockchain technology is a key technology in the current information field and has been widely used in various industries. Blockchain technology faces significant challenges in privacy protection while ensuring data immutability and transparency, so it is crucial to implement private computing in blockchain. To target the privacy issues in blockchain, we design a secure multi-party computation (SMPC) protocol DHSMPC based on homomorphic encryption in this paper. On the one hand, homomorphic encryption technology can directly operate on ciphertext, solving the privacy problem in the blockchain. On the other hand, this paper designs the directed decryption function of DHSMPC to resist malicious opponents in the CRS model, so that authorized users who do not participate in the calculation can also access the decryption results of secure multi-party computation. Analytical and experimental results show that DHSMPC has smaller ciphertext size and stronger performance than existing SMPC protocols. The protocol makes it possible to implement complex calculations in multi-party scenarios and is proven to be resistant to various semi-malicious attacks, ensuring data security and privacy. Finally, this article combines the designed DHSMPC protocol with blockchain and cloud computing, showing how to use this solution to achieve trusted data management in specific scenarios.}, } @article {pmid39669685, year = {2024}, author = {Oh, S and Gravel-Pucillo, K and Ramos, M and Schatz, MC and Davis, S and Carey, V and Morgan, M and Waldron, L}, title = {AnVILWorkflow: A runnable workflow package for Cloud-implemented bioinformatics analysis pipelines.}, journal = {F1000Research}, volume = {13}, number = {}, pages = {1257}, pmid = {39669685}, issn = {2046-1402}, support = {U24 HG010263/HG/NHGRI NIH HHS/United States ; }, mesh = {*Workflow ; *Computational Biology/methods ; *Cloud Computing ; *Software ; Genomics/methods ; Humans ; }, abstract = {Advancements in sequencing technologies and the development of new data collection methods produce large volumes of biological data. The Genomic Data Science Analysis, Visualization, and Informatics Lab-space (AnVIL) provides a cloud-based platform for democratizing access to large-scale genomics data and analysis tools. However, utilizing the full capabilities of AnVIL can be challenging for researchers without extensive bioinformatics expertise, especially for executing complex workflows. We present the AnVILWorkflow R package, which enables the convenient execution of bioinformatics workflows hosted on AnVIL directly from an R environment. AnVILWorkflow simplifies the setup of the cloud computing environment, input data formatting, workflow submission, and retrieval of results through intuitive functions. We demonstrate the utility of AnVILWorkflow for three use cases: bulk RNA-seq analysis with Salmon, metagenomics analysis with bioBakery, and digital pathology image processing with PathML. The key features of AnVILWorkflow include user-friendly browsing of available data and workflows, seamless integration of R and non-R tools within a reproducible analysis pipeline, and accessibility to scalable computing resources without direct management overhead. AnVILWorkflow lowers the barrier to utilizing AnVIL's resources, especially for exploratory analyses or bulk processing with established workflows. This empowers a broader community of researchers to leverage the latest genomics tools and datasets using familiar R syntax. This package is distributed through the Bioconductor project (https://bioconductor.org/packages/AnVILWorkflow), and the source code is available through GitHub (https://github.com/shbrief/AnVILWorkflow).}, } @article {pmid39666605, year = {2024}, author = {Bano, S and Abbas, G and Bilal, M and Abbas, ZH and Ali, Z and Waqas, M}, title = {PHyPO: Priority-based Hybrid task Partitioning and Offloading in mobile computing using automated machine learning.}, journal = {PloS one}, volume = {19}, number = {12}, pages = {e0314198}, pmid = {39666605}, issn = {1932-6203}, mesh = {*Machine Learning ; *Algorithms ; *Cloud Computing ; Humans ; }, abstract = {With the increasing demand for mobile computing, the requirement for intelligent resource management has also increased. Cloud computing lessens the energy consumption of user equipment, but it increases the latency of the system. Whereas edge computing reduces the latency along with the energy consumption, it has limited resources and cannot process bigger tasks. To resolve these issues, a Priority-based Hybrid task Partitioning and Offloading (PHyPO) scheme is introduced in this paper, which prioritizes the tasks with high time sensitivity and offloads them intelligently. It also calculates the optimal number of partitions a task can be divided into. The utility of resources is maximized along with increasing the processing capability of the model by using a hybrid architecture, consisting of mobile devices, edge servers, and cloud servers. Automated machine learning is used to identify the optimal classification models, along with tuning their hyper-parameters, which results in adaptive boosting ensemble learning-based models to reduce the time complexity of the system to O(1). The results of the proposed algorithm show a significant improvement over benchmark techniques along with achieving an accuracy of 96.1% for the optimal partitioning model and 94.3% for the optimal offloading model, with both the results being achieved in significantly less or equal time as compared to the benchmark techniques.}, } @article {pmid39664397, year = {2024}, author = {Katapally, TR}, title = {It's late, but not too late to transform health systems: a global digital citizen science observatory for local solutions to global problems.}, journal = {Frontiers in digital health}, volume = {6}, number = {}, pages = {1399992}, pmid = {39664397}, issn = {2673-253X}, abstract = {A key challenge in monitoring, managing, and mitigating global health crises is the need to coordinate clinical decision-making with systems outside of healthcare. In the 21st century, human engagement with Internet-connected ubiquitous devices generates an enormous amount of big data, which can be used to address complex, intersectoral problems via participatory epidemiology and mHealth approaches that can be operationalized with digital citizen science. These big data - which traditionally exist outside of health systems - are underutilized even though their usage can have significant implications for prediction and prevention of communicable and non-communicable diseases. To address critical challenges and gaps in big data utilization across sectors, a Digital Citizen Science Observatory (DiScO) is being developed by the Digital Epidemiology and Population Health Laboratory by scaling up existing digital health infrastructure. DiScO's development is informed by the Smart Framework, which leverages ubiquitous devices for ethical surveillance. The Observatory will be operationalized by implementing a rapidly adaptable, replicable, and scalable progressive web application that repurposes jurisdiction-specific cloud infrastructure to address crises across jurisdictions. The Observatory is designed to be highly adaptable for both rapid data collection as well as rapid responses to emerging and existing crises. Data sovereignty and decentralization of technology are core aspects of the observatory, where citizens can own the data they generate, and researchers and decision-makers can re-purpose digital health infrastructure. The ultimate aim of DiScO is to transform health systems by breaking existing jurisdictional silos in addressing global health crises.}, } @article {pmid39663386, year = {2024}, author = {Parente, L and Sloat, L and Mesquita, V and Consoli, D and Stanimirova, R and Hengl, T and Bonannella, C and Teles, N and Wheeler, I and Hunter, M and Ehrmann, S and Ferreira, L and Mattos, AP and Oliveira, B and Meyer, C and Şahin, M and Witjes, M and Fritz, S and Malek, Z and Stolle, F}, title = {Annual 30-m maps of global grassland class and extent (2000-2022) based on spatiotemporal Machine Learning.}, journal = {Scientific data}, volume = {11}, number = {1}, pages = {1303}, pmid = {39663386}, issn = {2052-4463}, support = {202548816//Deutsche Forschungsgemeinschaft (German Research Foundation)/ ; }, abstract = {The paper describes the production and evaluation of global grassland extent mapped annually for 2000-2022 at 30 m spatial resolution. The dataset showing the spatiotemporal distribution of cultivated and natural/semi-natural grassland classes was produced by using GLAD Landsat ARD-2 image archive, accompanied by climatic, landform and proximity covariates, spatiotemporal machine learning (per-class Random Forest) and over 2.3 M reference samples (visually interpreted in Very High Resolution imagery). Custom probability thresholds (based on five-fold spatial cross-validation) were used to derive dominant class maps with balanced user's and producer's accuracy, resulting in f1 score of 0.64 and 0.75 for cultivated and natural/semi-natural grassland, respectively. The produced maps (about 4 TB in size) are available under an open data license as Cloud-Optimized GeoTIFFs and as Google Earth Engine assets. The suggested uses of data include (1) integration with other compatible land cover products and (2) tracking the intensity and drivers of conversion of land to cultivated grasslands and from natural / semi-natural grasslands into other land use systems.}, } @article {pmid39653390, year = {2024}, author = {Truong, V and Moore, JE and Ricoy, UM and Verpeut, JL}, title = {Low-Cost Approaches in Neuroscience to Teach Machine Learning Using a Cockroach Model.}, journal = {eNeuro}, volume = {11}, number = {12}, pages = {}, pmid = {39653390}, issn = {2373-2822}, mesh = {Animals ; *Machine Learning ; *Cockroaches/physiology ; *Neurosciences/education ; Behavior, Animal/physiology ; Nicotine/administration & dosage ; Male ; }, abstract = {In an effort to increase access to neuroscience education in underserved communities, we created an educational program that utilizes a simple task to measure place preference of the cockroach (Gromphadorhina portentosa) and the open-source free software, SLEAP Estimates Animal Poses (SLEAP) to quantify behavior. Cockroaches (n = 18) were trained to explore a linear track for 2 min while exposed to either air, vapor, or vapor with nicotine from a port on one side of the linear track over 14 d. The time the animal took to reach the port was measured, along with distance traveled, time spent in each zone, and velocity. As characterizing behavior is challenging and inaccessible for nonexperts new to behavioral research, we created an educational program using the machine learning algorithm, SLEAP, and cloud-based (i.e., Google Colab) low-cost platforms for data analysis. We found that SLEAP was within a 0.5% margin of error when compared with manually scoring the data. Cockroaches were found to have an increased aversive response to vapor alone compared with those that only received air. Using SLEAP, we demonstrate that the x-y coordinate data can be further classified into behavior using dimensionality-reducing clustering methods. This suggests that the linear track can be used to examine nicotine preference for the cockroach, and SLEAP can provide a fast, efficient way to analyze animal behavior. Moreover, this educational program is available for free for students to learn a complex machine learning algorithm without expensive hardware to study animal behavior.}, } @article {pmid39650555, year = {2024}, author = {Consoli, D and Parente, L and Simoes, R and Şahin, M and Tian, X and Witjes, M and Sloat, L and Hengl, T}, title = {A computational framework for processing time-series of earth observation data based on discrete convolution: global-scale historical Landsat cloud-free aggregates at 30 m spatial resolution.}, journal = {PeerJ}, volume = {12}, number = {}, pages = {e18585}, pmid = {39650555}, issn = {2167-8359}, mesh = {*Satellite Imagery/methods ; Earth, Planet ; Environmental Monitoring/methods ; Algorithms ; }, abstract = {Processing large collections of earth observation (EO) time-series, often petabyte-sized, such as NASA's Landsat and ESA's Sentinel missions, can be computationally prohibitive and costly. Despite their name, even the Analysis Ready Data (ARD) versions of such collections can rarely be used as direct input for modeling because of cloud presence and/or prohibitive storage size. Existing solutions for readily using these data are not openly available, are poor in performance, or lack flexibility. Addressing this issue, we developed TSIRF (Time-Series Iteration-free Reconstruction Framework), a computational framework that can be used to apply diverse time-series processing tasks, such as temporal aggregation and time-series reconstruction by simply adjusting the convolution kernel. As the first large-scale application, TSIRF was employed to process the entire Global Land Analysis and Discovery (GLAD) ARD Landsat archive, producing a cloud-free bi-monthly aggregated product. This process, covering seven Landsat bands globally from 1997 to 2022, with more than two trillion pixels and for each one a time-series of 156 samples in the aggregated product, required approximately 28 hours of computation using 1248 Intel[®] Xeon[®] Gold 6248R CPUs. The quality of the result was assessed using a benchmark dataset derived from the aggregated product and comparing different imputation strategies. The resulting reconstructed images can be used as input for machine learning models or to map biophysical indices. To further limit the storage size the produced data was saved as 8-bit Cloud-Optimized GeoTIFFs (COG). With the hosting of about 20 TB per band/index for an entire 30 m resolution bi-monthly historical time-series distributed as open data, the product enables seamless, fast, and affordable access to the Landsat archive for environmental monitoring and analysis applications.}, } @article {pmid39650512, year = {2024}, author = {Chen, H and Al-Turjman, F}, title = {Cloud-based configurable data stream processing architecture in rural economic development.}, journal = {PeerJ. Computer science}, volume = {10}, number = {}, pages = {e2547}, pmid = {39650512}, issn = {2376-5992}, abstract = {PURPOSE: This study aims to address the limitations of traditional data processing methods in predicting agricultural product prices, which is essential for advancing rural informatization to enhance agricultural efficiency and support rural economic growth.

METHODOLOGY: The RL-CNN-GRU framework combines reinforcement learning (RL), convolutional neural network (CNN), and gated recurrent unit (GRU) to improve agricultural price predictions using multidimensional time series data, including historical prices, weather, soil conditions, and other influencing factors. Initially, the model employs a 1D-CNN for feature extraction, followed by GRUs to capture temporal patterns in the data. Reinforcement learning further optimizes the model, enhancing the analysis and accuracy of multidimensional data inputs for more reliable price predictions.

RESULTS: Testing on public and proprietary datasets shows that the RL-CNN-GRU framework significantly outperforms traditional models in predicting prices, with lower mean squared error (MSE) and mean absolute error (MAE) metrics.

CONCLUSION: The RL-CNN-GRU framework contributes to rural informatization by offering a more accurate prediction tool, thereby supporting improved decision-making in agricultural processes and fostering rural economic development.}, } @article {pmid39650510, year = {2024}, author = {Ur Rehman, A and Lu, S and Ashraf, MA and Iqbal, MS and Nawabi, AK and Amin, F and Abbasi, R and de la Torre, I and Gracia Villar, S and Lopez, LAD and Heyat, MBB}, title = {The role of Internet of Things (IoT) technology in modern cultivation for the implementation of greenhouses.}, journal = {PeerJ. Computer science}, volume = {10}, number = {}, pages = {e2309}, pmid = {39650510}, issn = {2376-5992}, abstract = {In recent years, the Internet of Things (IoT) has become one of the most familiar names creating a benchmark and scaling new heights. IoT an indeed future of the communication that has transformed the objects (things) of the real world into smarter devices. With the advent of IoT technology, this decade is witnessing a transformation from traditional agriculture approaches to the most advanced ones. Limited research has been carried out in this direction. Thus, herein we present various technological aspects involved in IoT-based cultivation. The role and the key components of smart farming using IoT were examined, with a focus on network technologies, including layers, protocols, topologies, network architecture, etc. We also delve into the integration of relevant technologies such as cloud computing, big data analytics, and the integration of IoT-based cultivation. We explored various security issues in modern IoT cultivation and also emphasized the importance of safeguarding sensitive agricultural data. Additionally, a comprehensive list of applications based on sensors and mobile devices is provided, offering refined solutions for greenhouse management. The principles and regulations established by different countries for IoT-based cultivation systems are presented, demonstrating the global recognition of these technologies. Furthermore, a selection of successful use cases and real-world scenarios and applications were presented. Finally, the open research challenges and solutions in modern IoT-based cultivation were discussed.}, } @article {pmid39650495, year = {2024}, author = {Akram, A and Anjum, F and Latif, S and Zulfiqar, MI and Nazir, M}, title = {Honey bee inspired resource allocation scheme for IoT-driven smart healthcare applications in fog-cloud paradigm.}, journal = {PeerJ. Computer science}, volume = {10}, number = {}, pages = {e2484}, pmid = {39650495}, issn = {2376-5992}, abstract = {The Internet of Things (IoT) paradigm is a foundational and integral factor for the development of smart applications in different sectors. These applications are comprised over set of interconnected modules that exchange data and realize the distributed data flow (DDF) model. The execution of these modules on distant cloud data-center is prone to quality of service (QoS) degradation. This is where fog computing philosophy comes in to bridge this gap and bring the computation closer to the IoT devices. However, resource management in fog and optimal allocation of fog devices to application modules is critical for better resource utilization and achieve QoS. Significant challenge in this regard is to manage the fog network dynamically to determine cost effective placement of application modules on resources. In this study, we propose the optimal placement strategy for smart health-care application modules on fog resources. The objective of this strategy is to ensure optimal execution in terms of latency, bandwidth and earliest completion time as compared to few baseline techniques. A honey bee inspired strategy has been proposed for allocation and utilization of the resource for application module processing. In order to model the application and measure the effectiveness of our strategy, iFogSim Java-based simulation classes have been extended and conduct the experiments that demonstrate the satisfactory results.}, } @article {pmid39650472, year = {2024}, author = {Balaji, P and Cengiz, K and Babu, S and Alqahtani, O and Akleylek, S}, title = {Metaheuristic optimized complex-valued dilated recurrent neural network for attack detection in internet of vehicular communications.}, journal = {PeerJ. Computer science}, volume = {10}, number = {}, pages = {e2366}, pmid = {39650472}, issn = {2376-5992}, abstract = {The Internet of Vehicles (IoV) is a specialized iteration of the Internet of Things (IoT) tailored to facilitate communication and connectivity among vehicles and their environment. It harnesses the power of advanced technologies such as cloud computing, wireless communication, and data analytics to seamlessly exchange real-time data among vehicles, road-side infrastructure, traffic management systems, and other entities. The primary objectives of this real-time data exchange include enhancing road safety, reducing traffic congestion, boosting traffic flow efficiency, and enriching the driving experience. Through the IoV, vehicles can share information about traffic conditions, weather forecasts, road hazards, and other relevant data, fostering smarter, safer, and more efficient transportation networks. Developing, implementing and maintaining sophisticated techniques for detecting attacks present significant challenges and costs, which might limit their deployment, especially in smaller settings or those with constrained resources. To overcome these drawbacks, this article outlines developing an innovative attack detection model for the IoV using advanced deep learning techniques. The model aims to enhance security in vehicular networks by efficiently identifying attacks. Initially, data is collected from online databases and subjected to an optimal feature extraction process. During this phase, the Enhanced Exploitation in Hybrid Leader-based Optimization (EEHLO) method is employed to select the optimal features. These features are utilized by a Complex-Valued Dilated Recurrent Neural Network (CV-DRNN) to detect attacks within vehicle networks accurately. The performance of this novel attack detection model is rigorously evaluated and compared with that of traditional models using a variety of metrics.}, } @article {pmid39640392, year = {2024}, author = {Ojha, S and Paygude, P and Dhumane, A and Rathi, S and Bidve, V and Kumar, A and Devale, P}, title = {A method to enhance privacy preservation in cloud storage through a three-layer scheme for computational intelligence in fog computing.}, journal = {MethodsX}, volume = {13}, number = {}, pages = {103053}, pmid = {39640392}, issn = {2215-0161}, abstract = {Recent advancements in cloud computing have heightened concerns about data control and privacy due to vulnerabilities in traditional encryption methods, which may not withstand internal attacks from cloud servers. To overcome these issues about the data privacy and control of transfer on cloud, a novel three-tier storage model incorporating fog computing method has been proposed. This framework leverages the advantages of cloud storage while enhancing data privacy. The approach uses the Hash-Solomon code algorithm to partition data into distinct segments, distributing a portion of it across local machines and fog servers, in addition to cloud storage. This distribution not only increases data privacy but also optimises storage efficiency. Computational intelligence plays a crucial role by calculating the optimal data distribution across cloud, fog, and local servers, ensuring balanced and secure data storage.•Experimental analysis of this mathematical mode has demonstrated a significant improvement in storage efficiency, with increases ranging from 30 % to 40 % as the volume of data blocks grows.•This innovative framework based on Hash Solomon code method effectively addresses privacy concerns while maintaining the benefits of cloud computing, offering a robust solution for secure and efficient data management.}, } @article {pmid39637126, year = {2024}, author = {Valderrama-Landeros, L and Troche-Souza, C and Alcántara-Maya, JA and Velázquez-Salazar, S and Vázquez-Balderas, B and Villeda-Chávez, E and Cruz-López, MI and Ressl, R and Flores-Verdugo, F and Flores-de-Santiago, F}, title = {An assessment of mangrove forest in northwestern Mexico using the Google Earth Engine cloud computing platform.}, journal = {PloS one}, volume = {19}, number = {12}, pages = {e0315181}, pmid = {39637126}, issn = {1932-6203}, mesh = {Mexico ; *Wetlands ; *Avicennia ; *Cloud Computing ; Rhizophoraceae ; Conservation of Natural Resources/methods ; Environmental Monitoring/methods ; }, abstract = {Mangrove forests are commonly mapped using spaceborne remote sensing data due to the challenges of field endeavors in such harsh environments. However, these methods usually require a substantial level of manual processing for each image. Hence, conservation practitioners prioritize using cloud computing platforms to obtain accurate canopy classifications of large extensions of mangrove forests. The objective of this study was to analyze the spatial distribution and rate of change (area gain and loss) of the red mangrove (Rhizophora mangle) and other dominant mangrove species, mainly Avicennia germinans and Laguncularia racemosa, between 2015 and 2020 throughout the northwestern coast of Mexico. Bimonthly data of the Combined Mangrove Recognition Index (CMRI) from all available Sentinel-2 data were processed with the Google Earth Engine cloud computing platform. The results indicated an extension of 42865 ha of red mangrove and 139602 ha of other dominant mangrove species in the Gulf of California and the Pacific northwestern coast of Mexico for 2020. The mangrove extension experienced a notable decline of 1817 ha from 2015 to 2020, largely attributed to the expansion of aquaculture ponds and the destructive effects of hurricanes. Considering the two mangrove classes, the overall classification accuracies were 90% and 92% for the 2015 and 2020 maps, respectively. The advantages of the method compared to supervised classifications and traditional vegetation indices are discussed, as are the disadvantages concerning the spatial resolution and the minimum detection area. The work is a national effort to assist in decision-making to prioritize resource allocations for blue carbon, rehabilitation, and climate change mitigation programs.}, } @article {pmid39636866, year = {2024}, author = {Khan, M and Chao, W and Rahim, M and Amin, F}, title = {Enhancing green supplier selection: A nonlinear programming method with TOPSIS in cubic Pythagorean fuzzy contexts.}, journal = {PloS one}, volume = {19}, number = {12}, pages = {e0310956}, pmid = {39636866}, issn = {1932-6203}, mesh = {*Fuzzy Logic ; *Algorithms ; Nonlinear Dynamics ; Artificial Intelligence ; }, abstract = {The advancements in information and communication technologies have given rise to innovative developments such as cloud computing, the Internet of Things, big data analytics, and artificial intelligence. These technologies have been integrated into production systems, transforming them into intelligent systems and significantly impacting the supplier selection process. In recent years, the integration of these cutting-edge technologies with traditional and environmentally conscious criteria has gained considerable attention in supplier selection. This paper introduces a novel Nonlinear Programming (NLP) approach that utilizes the Technique for Order Preference by Similarity to Ideal Solution (TOPSIS) method to identify the most suitable green supplier within cubic Pythagorean fuzzy (CPF) environments. Unlike existing methods that use either interval-valued PFS (IVPFS) or Pythagorean fuzzy sets (PFS) to represent information, our approach employs cubic Pythagorean fuzzy sets (CPFS), effectively addressing both IVPFS and PFS simultaneously. The proposed NLP models leverage interval weights, relative closeness coefficients (RCC), and weighted distance measurements to tackle complex decision-making problems. To illustrate the accuracy and effectiveness of the proposed selection methodology, we present a real-world case study related to green supplier selection.}, } @article {pmid39635776, year = {2025}, author = {Corrêa Veríssimo, G and Salgado Ferreira, R and Gonçalves Maltarollo, V}, title = {Ultra-Large Virtual Screening: Definition, Recent Advances, and Challenges in Drug Design.}, journal = {Molecular informatics}, volume = {44}, number = {1}, pages = {e202400305}, doi = {10.1002/minf.202400305}, pmid = {39635776}, issn = {1868-1751}, mesh = {*Drug Design ; *Molecular Docking Simulation ; Machine Learning ; Software ; Humans ; Drug Discovery/methods ; Algorithms ; Drug Evaluation, Preclinical/methods ; }, abstract = {Virtual screening (VS) in drug design employs computational methodologies to systematically rank molecules from a virtual compound library based on predicted features related to their biological activities or chemical properties. The recent expansion in commercially accessible compound libraries and the advancements in artificial intelligence (AI) and computational power - including enhanced central processing units (CPUs), graphics processing units (GPUs), high-performance computing (HPC), and cloud computing - have significantly expanded our capacity to screen libraries containing over 10[9] molecules. Herein, we review the concept of ultra-large virtual screening (ULVS), focusing on the various algorithms and methodologies employed for virtual screening at this scale. In this context, we present the software utilized, applications, and results of different approaches, such as brute force docking, reaction-based docking approaches, machine learning (ML) strategies applied to docking or other VS methods, and similarity/pharmacophore search-based techniques. These examples represent a paradigm shift in the drug discovery process, demonstrating not only the feasibility of billion-scale compound screening but also their potential to identify hit candidates and increase the structural diversity of novel compounds with biological activities.}, } @article {pmid39632902, year = {2024}, author = {Prasad, VK and Verma, A and Bhattacharya, P and Shah, S and Chowdhury, S and Bhavsar, M and Aslam, S and Ashraf, N}, title = {Revolutionizing healthcare: a comparative insight into deep learning's role in medical imaging.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {30273}, pmid = {39632902}, issn = {2045-2322}, mesh = {*Deep Learning ; Humans ; *Magnetic Resonance Imaging/methods ; Cloud Computing ; Neural Networks, Computer ; Alzheimer Disease/diagnostic imaging ; Diagnostic Imaging/methods ; Image Processing, Computer-Assisted/methods ; }, abstract = {Recently, Deep Learning (DL) models have shown promising accuracy in analysis of medical images. Alzeheimer Disease (AD), a prevalent form of dementia, uses Magnetic Resonance Imaging (MRI) scans, which is then analysed via DL models. To address the model computational constraints, Cloud Computing (CC) is integrated to operate with the DL models. Recent articles on DL-based MRI have not discussed datasets specific to different diseases, which makes it difficult to build the specific DL model. Thus, the article systematically explores a tutorial approach, where we first discuss a classification taxonomy of medical imaging datasets. Next, we present a case-study on AD MRI classification using the DL methods. We analyse three distinct models-Convolutional Neural Networks (CNN), Visual Geometry Group 16 (VGG-16), and an ensemble approach-for classification and predictive outcomes. In addition, we designed a novel framework that offers insight into how various layers interact with the dataset. Our architecture comprises an input layer, a cloud-based layer responsible for preprocessing and model execution, and a diagnostic layer that issues alerts after successful classification and prediction. According to our simulations, CNN outperformed other models with a test accuracy of 99.285%, followed by VGG-16 with 85.113%, while the ensemble model lagged with a disappointing test accuracy of 79.192%. Our cloud Computing framework serves as an efficient mechanism for medical image processing while safeguarding patient confidentiality and data privacy.}, } @article {pmid39628585, year = {2024}, author = {Tang, H and Kong, L and Fang, Z and Zhang, Z and Zhou, J and Chen, H and Sun, J and Zou, X}, title = {Sustainable and smart rail transit based on advanced self-powered sensing technology.}, journal = {iScience}, volume = {27}, number = {12}, pages = {111306}, pmid = {39628585}, issn = {2589-0042}, abstract = {As rail transit continues to develop, expanding railway networks increase the demand for sustainable energy supply and intelligent infrastructure management. In recent years, advanced rail self-powered technology has rapidly progressed toward artificial intelligence and the internet of things (AIoT). This review primarily discusses the self-powered and self-sensing systems in rail transit, analyzing their current characteristics and innovative potentials in different scenarios. Based on this analysis, we further explore an IoT framework supported by sustainable self-powered sensing systems including device nodes, network communication, and platform deployment. Additionally, technologies about cloud computing and edge computing deployed in railway IoT enable more effective utilization. The deployed intelligent algorithms such as machine learning (ML) and deep learning (DL) can provide comprehensive monitoring, management, and maintenance in railway environments. Furthermore, this study explores research in other cross-disciplinary fields to investigate the potential of emerging technologies and analyze the trends for future development in rail transit.}, } @article {pmid39625991, year = {2024}, author = {Asim Shahid, M and Alam, MM and Mohd Su'ud, M}, title = {A fact based analysis of decision trees for improving reliability in cloud computing.}, journal = {PloS one}, volume = {19}, number = {12}, pages = {e0311089}, pmid = {39625991}, issn = {1932-6203}, mesh = {*Decision Trees ; *Cloud Computing ; *Algorithms ; *Bayes Theorem ; *Machine Learning ; Reproducibility of Results ; Deep Learning ; }, abstract = {The popularity of cloud computing (CC) has increased significantly in recent years due to its cost-effectiveness and simplified resource allocation. Owing to the exponential rise of cloud computing in the past decade, many corporations and businesses have moved to the cloud to ensure accessibility, scalability, and transparency. The proposed research involves comparing the accuracy and fault prediction of five machine learning algorithms: AdaBoostM1, Bagging, Decision Tree (J48), Deep Learning (Dl4jMLP), and Naive Bayes Tree (NB Tree). The results from secondary data analysis indicate that the Central Processing Unit CPU-Mem Multi classifier has the highest accuracy percentage and the least amount of fault prediction. This holds for the Decision Tree (J48) classifier with an accuracy rate of 89.71% for 80/20, 90.28% for 70/30, and 92.82% for 10-fold cross-validation. Additionally, the Hard Disk Drive HDD-Mono classifier has an accuracy rate of 90.35% for 80/20, 92.35% for 70/30, and 90.49% for 10-fold cross-validation. The AdaBoostM1 classifier was found to have the highest accuracy percentage and the least amount of fault prediction for the HDD Multi classifier with an accuracy rate of 93.63% for 80/20, 90.09% for 70/30, and 88.92% for 10-fold cross-validation. Finally, the CPU-Mem Mono classifier has an accuracy rate of 77.87% for 80/20, 77.01% for 70/30, and 77.06% for 10-fold cross-validation. Based on the primary data results, the Naive Bayes Tree (NB Tree) classifier is found to have the highest accuracy rate with less fault prediction of 97.05% for 80/20, 96.09% for 70/30, and 96.78% for 10 folds cross-validation. However, the algorithm complexity is not good, taking 1.01 seconds. On the other hand, the Decision Tree (J48) has the second-highest accuracy rate of 96.78%, 95.95%, and 96.78% for 80/20, 70/30, and 10-fold cross-validation, respectively. J48 also has less fault prediction but with a good algorithm complexity of 0.11 seconds. The difference in accuracy and less fault prediction between NB Tree and J48 is only 0.9%, but the difference in time complexity is 9 seconds. Based on the results, we have decided to make modifications to the Decision Tree (J48) algorithm. This method has been proposed as it offers the highest accuracy and less fault prediction errors, with 97.05% accuracy for the 80/20 split, 96.42% for the 70/30 split, and 97.07% for the 10-fold cross-validation.}, } @article {pmid39625566, year = {2024}, author = {Hegde, A and Vijaysenan, D and Mandava, P and Menon, G}, title = {The use of cloud based machine learning to predict outcome in intracerebral haemorrhage without explicit programming expertise.}, journal = {Neurosurgical review}, volume = {47}, number = {1}, pages = {883}, pmid = {39625566}, issn = {1437-2320}, mesh = {Humans ; *Machine Learning ; *Cerebral Hemorrhage/diagnosis ; Male ; Female ; Aged ; Middle Aged ; Prognosis ; Aged, 80 and over ; Cloud Computing ; Logistic Models ; Glasgow Coma Scale ; ROC Curve ; Treatment Outcome ; }, abstract = {Machine Learning (ML) techniques require novel computer programming skills along with clinical domain knowledge to produce a useful model. We demonstrate the use of a cloud-based ML tool that does not require any programming expertise to develop, validate and deploy a prognostic model for Intracerebral Haemorrhage (ICH). The data of patients admitted with Spontaneous Intracerebral haemorrhage from January 2015 to December 2019 was accessed from our prospectively maintained hospital stroke registry. 80% of the dataset was used for training, 10% for validation, and 10% for testing. Seventeen input variables were used to predict the dichotomized outcomes (Good outcome mRS 0-3/ Bad outcome mRS 4-6), using machine learning (ML) and logistic regression (LR) models. The two different approaches were evaluated using Area Under the Curve (AUC) for Receiver Operating Characteristic (ROC), Precision recall and accuracy. Our data set comprised of a cohort of 1000 patients. The data was split 8:1 for training & testing respectively. The AUC ROC of the ML model was 0.86 with an accuracy of 75.7%. With LR AUC ROC was 0.74 with an accuracy of 73.8%. Feature importance chart showed that Glasgow coma score (GCS) at presentation had the highest relative importance, followed by hematoma volume and age in both approaches. Machine learning models perform better when compared to logistic regression. Models can be developed by clinicians possessing domain expertise and no programming experience using cloud based tools. The models so developed lend themselves to be incorporated into clinical workflow.}, } @article {pmid39622969, year = {2024}, author = {Bhakhar, R and Chhillar, RS}, title = {Dynamic multi-criteria scheduling algorithm for smart home tasks in fog-cloud IoT systems.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {29957}, pmid = {39622969}, issn = {2045-2322}, abstract = {The proliferation of Internet of Things (IoT) devices in smart homes has created a demand for efficient computational task management across complex networks. This paper introduces the Dynamic Multi-Criteria Scheduling (DMCS) algorithm, designed to enhance task scheduling in fog-cloud computing environments for smart home applications. DMCS dynamically allocates tasks based on criteria such as computational complexity, urgency, and data size, ensuring that time-sensitive tasks are processed swiftly on fog nodes while resource-intensive computations are handled by cloud data centers. The implementation of DMCS demonstrates significant improvements over conventional scheduling algorithms, reducing makespan, operational costs, and energy consumption. By effectively balancing immediate and delayed task execution, DMCS enhances system responsiveness and overall computational efficiency in smart home environments. However, DMCS also faces limitations, including computational overhead and scalability issues in larger networks. Future research will focus on integrating advanced machine learning algorithms to refine task classification, enhancing security measures, and expanding the framework's applicability to various computing environments. Ultimately, DMCS aims to provide a robust and adaptive scheduling solution capable of meeting the complex requirements of modern IoT ecosystems and improving the efficiency of smart homes.}, } @article {pmid39619197, year = {2024}, author = {Wang, H and Kong, X and Phewnil, O and Luo, J and Li, P and Chen, X and Xie, T}, title = {Spatiotemporal prediction of alpine wetlands under multi-climate scenarios in the west of Sichuan, China.}, journal = {PeerJ}, volume = {12}, number = {}, pages = {e18586}, pmid = {39619197}, issn = {2167-8359}, mesh = {*Wetlands ; China ; *Climate Change ; Spatio-Temporal Analysis ; Forecasting/methods ; }, abstract = {BACKGROUND: The alpine wetlands in western Sichuan are distributed along the eastern section of the Qinghai-Tibet Plateau (QTP), where the ecological environment is fragile and highly sensitive to global climate change. These wetlands are already experiencing severe ecological and environmental issues, such as drought, retrogressive succession, and desertification. However, due to the limitations of computational models, previous studies have been unable to adequately understand the spatiotemporal change trends of these alpine wetlands.

METHODS: We employed a large sample and composite supervised classification algorithms to classify alpine wetlands and generate wetland maps, based on the Google Earth Engine cloud computing platform. The thematic maps were then grid-sampled for predictive modeling of future wetland changes. Four species distribution models (SDMs), BIOCLIM, DOMAIN, MAXENT, and GARP were innovatively introduced. Using the WorldClim dataset as environmental variables, we predicted the future distribution of wetlands in western Sichuan under multiple climate scenarios.

RESULTS: The Kappa coefficients for Landsat 8 and Sentinel 2 were 0.89 and 0.91, respectively. Among the four SDMs, MAXENT achieved a higher accuracy (α = 91.6%) for the actual wetland compared to the thematic overlay analysis. The area under the curve (AUC) of the MAXENT model simulations for wetland spatial distribution were all greater than 0.80. This suggests that incorporating the SDM model into land change simulations has high generalizability and significant advantages on a large scale. Furthermore, simulation results reveal that between 2021 and 2100 years, with increasing emission concentrations, highly suitable areas for wetland development exhibit significant spatial differentiation. In particular, wetland areas in high-altitude regions are expected to increase, while low-altitude regions will markedly shrink. The changes in the future spatial distribution of wetlands show a high level of consistency with historical climate changes, with warming being the main driving force behind the spatiotemporal changes in alpine wetlands in western Sichuan, especially evident in the central high-altitude and northern low-altitude areas.}, } @article {pmid39616207, year = {2024}, author = {Wu, SH and Mueller, TA}, title = {A user-friendly NoSQL framework for managing agricultural field trial data.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {29819}, pmid = {39616207}, issn = {2045-2322}, support = {MOST 111-2313-B-002-007-MY3//Ministry of Science and Technology, Taiwan/ ; }, mesh = {*Agriculture/methods ; Databases, Factual ; User-Computer Interface ; Software ; Database Management Systems ; Humans ; Internet ; }, abstract = {Field trials are one of the essential stages in agricultural product development, enabling the validation of products in real-world environments rather than controlled laboratory or greenhouse settings. With the advancement in technologies, field trials often collect a large amount of information with diverse data types from various sources. Managing and organizing extensive datasets can impose challenges for small research teams, especially with constantly evolving data collection processes with multiple collaborators and introducing new data types between studies. A practical database needs to be able to incorporate all these changes seamlessly. We present DynamoField, a flexible database framework for collecting and analyzing field trial data. The backend database for DynamoField is powered by Amazon Web Services DynamoDB, a NoSQL database, and DynamoField also provides a front-end interactive web interface. With the flexibility of the NoSQL database, researchers can modify the database schema based on the data provided by various collaborators and contract research organizations. This framework includes functions for non-technical users, including importing and exporting data, data integration and manipulation, and performing statistical analysis. Researchers can utilize cloud computing to establish a secure NoSQL database with minimum maintenance, this also enables collaboration with others worldwide and adapt to different data-collecting strategies as their research progresses. DynamoField is implemented in Python, and it is publicly available at https://github.com/ComputationalAgronomy/DynamoField .}, } @article {pmid39607101, year = {2024}, author = {Hillebrand, FL and Prieto, JD and Mendes Júnior, CW and Arigony-Neto, J and Simões, JC}, title = {Gray Level Co-occurrence Matrix textural analysis for temporal mapping of sea ice in Sentinel-1A SAR images.}, journal = {Anais da Academia Brasileira de Ciencias}, volume = {96}, number = {suppl 2}, pages = {e20240554}, doi = {10.1590/0001-3765202420240554}, pmid = {39607101}, issn = {1678-2690}, mesh = {*Ice Cover ; *Radar ; Environmental Monitoring/methods ; Climate Change ; Seasons ; }, abstract = {Sea ice is a critical component of the cryosphere and plays a role in the heat and moisture exchange processes between the ocean and atmosphere, thus regulating the global climate. With climate change, detailed monitoring of changes occurring in sea ice is necessary. Therefore, an analysis was conducted to evaluate the potential of using the Gray Level Co-occurrence Matrix (GLCM) texture analysis combined with the backscattering coefficient (σ°) of HH polarization in Sentinel-1A Synthetic Aperture Radar (SAR) images, interferometric imaging mode, for mapping sea ice in time series. Data processing was performed using cloud computing on the Google Earth Engine platform with routines written in JavaScript. To train the Random Forest (RF) classifier, samples of regions with open water and sea ice were obtained through visual interpretation of false-color SAR images from Sentinel-1B in the extra-wide swath imaging mode. The analysis demonstrated that training samples used in the RF classifier from a specific date can be applied to images from other dates within the freezing period, achieving accuracies ≥ 90% when using 64-bit grayscale quantization in GLCM combined with σ° data. However, when using only σ° data in the RF classifier, accuracies ≥ 93% were observed.}, } @article {pmid39606346, year = {2024}, author = {Ricotta, EE and Bents, S and Lawler, B and Smith, BA and Majumder, MS}, title = {Search interest in alleged COVID-19 treatments over the pandemic period: the impact of mass news media.}, journal = {medRxiv : the preprint server for health sciences}, volume = {}, number = {}, pages = {}, pmid = {39606346}, support = {R35 GM146974/GM/NIGMS NIH HHS/United States ; }, abstract = {BACKGROUND: Understanding how individuals obtain medical information, especially amid changing guidance, is important for improving outreach and communication strategies. In particular, during a public health emergency, interest in unsafe or illegitimate medications can delay access to appropriate treatments and foster mistrust in the medical system, which can be detrimental at both individual and population levels. It is thus key to understand factors associated with said interest.

METHODS: We obtained US-based Google Search Trends and Media Cloud data from 2019-2022 to assess the relationship between Internet search interest and media coverage in three purported COVID-19 treatments: hydroxychloroquine, ivermectin, and remdesivir. We first conducted anomaly detection in the treatment-specific search interest data to detect periods of interest above pre-pandemic baseline; we then used multilevel negative binomial regression-controlling for political leaning, rurality, and social vulnerability-to test for associations between treatment-specific search interest and media coverage.

FINDINGS: We observed that interest in hydroxychloroquine and remdesivir peaked early in 2020 and then subsided, while peak interest in ivermectin occurred later but was more sustained. We detected significant associations between media coverage and search interest for all three treatments. The strongest association was observed for ivermectin, in which a single standard deviation increase in media coverage was associated with more than double the search interest (164%, 95% CI: 148, 180), compared to a 109% increase (95% CI: 101, 118) for hydroxychloroquine and a 49% increase (95% CI: 43, 55) for remdesivir.

INTERPRETATION: Search interest in purported COVID-19 treatments was significantly associated with contemporaneous media coverage, with the highest impact on interest in ivermectin, a treatment demonstrated to be ineffectual for treating COVID-19 and potentially dangerous if used inappropriately.

FUNDING: This work was funded in part by the US National Institutes of Health and the US National Science Foundation.}, } @article {pmid39605934, year = {2024}, author = {G C, S and Koparan, C and Upadhyay, A and Ahmed, MR and Zhang, Y and Howatt, K and Sun, X}, title = {A novel automated cloud-based image datasets for high throughput phenotyping in weed classification.}, journal = {Data in brief}, volume = {57}, number = {}, pages = {111097}, pmid = {39605934}, issn = {2352-3409}, abstract = {Deep learning-based weed detection data management involves data acquisition, data labeling, model development, and model evaluation phases. Out of these data management phases, data acquisition and data labeling are labor-intensive and time-consuming steps for building robust models. In addition, low temporal variation of crop and weed in the datasets is one of the limiting factors for effective weed detection model development. This article describes the cloud-based automatic data acquisition system (CADAS) to capture the weed and crop images in fixed time intervals to take plant growth stages into account for weed identification. The CADAS was developed by integrating fifteen digital cameras in the visible spectrum with gphoto2 libraries, external storage, cloud storage, and a computer with Linux operating system. Dataset from CADAS system contain six weed species and eight crop species for weed and crop detection. A dataset of 2000 images per weed and crop species was publicly released. Raw RGB images underwent a cropping process guided by bounding box annotations to generate individual JPG images for crop and weed instances. In addition to cropped image 200 raw images with label files were released publicly. This dataset hold potential for investigating challenges in deep learning-based weed and crop detection in agricultural settings. Additionally, this data could be used by researcher along with field data to boost the model performance by reducing data imbalance problem.}, } @article {pmid39605518, year = {2024}, author = {Geng, J and Voitiuk, K and Parks, DF and Robbins, A and Spaeth, A and Sevetson, JL and Hernandez, S and Schweiger, HE and Andrews, JP and Seiler, ST and Elliott, MAT and Chang, EF and Nowakowski, TJ and Currie, R and Mostajo-Radji, MA and Haussler, D and Sharf, T and Salama, SR and Teodorescu, M}, title = {Multiscale Cloud-Based Pipeline for Neuronal Electrophysiology Analysis and Visualization.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {39605518}, issn = {2692-8205}, support = {K12 GM139185/GM/NIGMS NIH HHS/United States ; R01 MH120295/MH/NIMH NIH HHS/United States ; RM1 HG011543/HG/NHGRI NIH HHS/United States ; U24 MH132628/MH/NIMH NIH HHS/United States ; }, abstract = {Electrophysiology offers a high-resolution method for real-time measurement of neural activity. Longitudinal recordings from high-density microelectrode arrays (HD-MEAs) can be of considerable size for local storage and of substantial complexity for extracting neural features and network dynamics. Analysis is often demanding due to the need for multiple software tools with different runtime dependencies. To address these challenges, we developed an open-source cloud-based pipeline to store, analyze, and visualize neuronal electrophysiology recordings from HD-MEAs. This pipeline is dependency agnostic by utilizing cloud storage, cloud computing resources, and an Internet of Things messaging protocol. We containerized the services and algorithms to serve as scalable and flexible building blocks within the pipeline. In this paper, we applied this pipeline on two types of cultures, cortical organoids and ex vivo brain slice recordings to show that this pipeline simplifies the data analysis process and facilitates understanding neuronal activity.}, } @article {pmid39605506, year = {2024}, author = {Papudeshi, B and Roach, MJ and Mallawaarachchi, V and Bouras, G and Grigson, SR and Giles, SK and Harker, CM and Hutton, ALK and Tarasenko, A and Inglis, LK and Vega, AA and Souza, C and Boling, L and Hajama, H and Cobián Güemes, AG and Segall, AM and Dinsdale, EA and Edwards, RA}, title = {phage therapy candidates from Sphae: An automated toolkit for predicting sequencing data.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {39605506}, issn = {2692-8205}, support = {RC2 DK116713/DK/NIDDK NIH HHS/United States ; }, abstract = {MOTIVATION: Phage therapy is a viable alternative for treating bacterial infections amidst the escalating threat of antimicrobial resistance. However, the therapeutic success of phage therapy depends on selecting safe and effective phage candidates. While experimental methods focus on isolating phages and determining their lifecycle and host range, comprehensive genomic screening is critical to identify markers that indicate potential risks, such as toxins, antimicrobial resistance, or temperate lifecycle traits. These analyses are often labor-intensive and time-consuming, limiting the rapid deployment of phage in clinical settings.

RESULTS: We developed Sphae, an automated bioinformatics pipeline designed to streamline therapeutic potential of a phage in under ten minutes. Using Snakemake workflow manager, Sphae integrates tools for quality control, assembly, genome assessment, and annotation tailored specifically for phage biology. Sphae automates the detection of key genomic markers, including virulence factors, antimicrobial resistance genes, and lysogeny indicators like integrase, recombinase, and transposase, which could preclude therapeutic use. Benchmarked on 65 phage sequences, 28 phage samples showed therapeutic potential, 8 failed during assembly due to low sequencing depth, 22 samples included prophage or virulent markers, and the remaining 23 samples included multiple phage genomes per sample. This workflow outputs a comprehensive report, enabling rapid assessment of phage safety and suitability for phage therapy under these criteria. Sphae is scalable, portable, facilitating efficient deployment across most high-performance computing (HPC) and cloud platforms, expediting the genomic evaluation process.

AVAILABILITY: Sphae is source code and freely available at https://github.com/linsalrob/sphae, with installation supported on Conda, PyPi, Docker containers.}, } @article {pmid39604739, year = {2024}, author = {Hasan, R and Kapoor, A and Singh, R and Yadav, BK}, title = {A state-of-the-art review on the quantitative and qualitative assessment of water resources using google earth engine.}, journal = {Environmental monitoring and assessment}, volume = {196}, number = {12}, pages = {1266}, pmid = {39604739}, issn = {1573-2959}, support = {RJF/2022/00110//Science and Engineering Research Board/ ; RJF/2022/00110//Science and Engineering Research Board/ ; NHP-1678-HYD//National Hydrological Project/ ; NHP-1678-HYD//National Hydrological Project/ ; }, mesh = {*Environmental Monitoring/methods ; *Water Resources ; *Geographic Information Systems ; Water Quality ; Nevada ; Remote Sensing Technology ; Water Supply ; Lakes/chemistry ; Conservation of Water Resources/methods ; }, abstract = {Water resource management is becoming essential due to many anthropogenic and climatic factors resulting in dwindling water resources. Traditionally, geographic information systems (GIS) and remote sensing (RS) have long been instrumental in water resource assessment and management as the satellites or airborne units are periodically utilized to collect data from large areal extent. However, these platforms have limited computational capability and localized storage systems. Recently, these limitations have been overcome by the application of Google Earth Engine (GEE) that offers a faster and more reliable cloud-based GIS and remote sensing platform that leverages its parallel processing capabilities. Thereby, in recent years, GEE has witnessed rapid and accelerated adoption and usage in a wide variety of domains, including water resource monitoring, assessment and management. However, no systematic studies have been made to review the GEE application in water resource management. This review article is a maiden attempt towards developing an understanding of the functioning of GEE and its application in water resource assessment, covering both of its aspects viz (a) water quantity and (b) water quality. The review further attempts to illustrate its capabilities in real-world utility, through a case study conducted to analyze water quality and quantity of lake mead, a reservoir of Hoover Dam, Nevada (USA), at a monthly scale for a 3-year period spanning from 2021 to 2023. The results of this case study showcase the applicability of GEE to the water resource quantity and quality monitoring, assessment and management problems. The review further discusses the existing challenges with the application of GEE in water resource assessment and the scope for further improvement. In conclusion, after tackling the existing challenges with GEE, the application of GEE in water resources has huge potential for management planning of our water resources by addressing the forthcoming challenges.}, } @article {pmid39599167, year = {2024}, author = {Jia, L and Sun, B and Tan, W and Zhang, S and Zhang, B and Zhu, J}, title = {Special Issue: Artificial Intelligence and Smart Sensor-Based Industrial Advanced Technology.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {22}, pages = {}, pmid = {39599167}, issn = {1424-8220}, abstract = {With the rapid growth of smart sensors and industrial data, artificial intelligence (AI) technology (such as machine learning, machine vision, multi-sensor fusion, cloud computing, edge computing, digital twins, etc [...].}, } @article {pmid39598999, year = {2024}, author = {Yang, Z and Wang, M and Xie, S}, title = {A Comprehensive Framework for Transportation Infrastructure Digitalization: TJYRoad-Net for Enhanced Point Cloud Segmentation.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {22}, pages = {}, pmid = {39598999}, issn = {1424-8220}, support = {2022YFC3002601//the National Key Research and Develop-ment Plan of China, Grant/Award/ ; 2023-4-ZD-05//the Fundamental Research Funds for the Central Universities/ ; }, abstract = {This research introduces a cutting-edge approach to traffic infrastructure digitization, integrating UAV oblique photography with LiDAR point clouds for high-precision, lightweight 3D road modeling. The proposed method addresses the challenge of accurately capturing the current state of infrastructure while minimizing redundancy and optimizing computational efficiency. A key innovation is the development of the TJYRoad-Net model, which achieves over 85% mIoU segmentation accuracy by including a traffic feature computing (TFC) module composed of three critical components: the Regional Coordinate Encoder (RCE), the Context-Aware Aggregation Unit (CAU), and the Hierarchical Expansion Block. Comparative analysis segments the point clouds into road and non-road categories, achieving centimeter-level registration accuracy with RANSAC and ICP. Two lightweight surface reconstruction techniques are implemented: (1) algorithmic reconstruction, which delivers a 6.3 mm elevation error at 95% confidence in complex intersections, and (2) template matching, which replaces road markings, poles, and vegetation using bounding boxes. These methods ensure accurate results with minimal memory overhead. The optimized 3D models have been successfully applied in driving simulation and traffic flow analysis, providing a practical and scalable solution for real-world infrastructure modeling and analysis. These applications demonstrate the versatility and efficiency of the proposed methods in modern traffic system simulations.}, } @article {pmid39595470, year = {2024}, author = {Shefa, FR and Sifat, FH and Uddin, J and Ahmad, Z and Kim, JM and Kibria, MG}, title = {Deep Learning and IoT-Based Ankle-Foot Orthosis for Enhanced Gait Optimization.}, journal = {Healthcare (Basel, Switzerland)}, volume = {12}, number = {22}, pages = {}, pmid = {39595470}, issn = {2227-9032}, support = {2021RIS-003//National Research Foundation of Korea/ ; }, abstract = {BACKGROUND/OBJECTIVES: This paper proposes a method for managing gait imbalances by integrating the Internet of Things (IoT) and machine learning technologies. Ankle-foot orthosis (AFO) devices are crucial medical braces that align the lower leg, ankle, and foot, offering essential support for individuals with gait imbalances by assisting weak or paralyzed muscles. This research aims to revolutionize medical orthotics through IoT and machine learning, providing a sophisticated solution for managing gait issues and enhancing patient care with personalized, data-driven insights.

METHODS: The smart ankle-foot orthosis (AFO) is equipped with a surface electromyography (sEMG) sensor to measure muscle activity and an Inertial Measurement Unit (IMU) sensor to monitor gait movements. Data from these sensors are transmitted to the cloud via fog computing for analysis, aiming to identify distinct walking phases, whether normal or aberrant. This involves preprocessing the data and analyzing it using various machine learning methods, such as Random Forest, Decision Tree, Support Vector Machine (SVM), Artificial Neural Network (ANN), Long Short-Term Memory (LSTM), and Transformer models.

RESULTS: The Transformer model demonstrates exceptional performance in classifying walking phases based on sensor data, achieving an accuracy of 98.97%. With this preprocessed data, the model can accurately predict and measure improvements in patients' walking patterns, highlighting its effectiveness in distinguishing between normal and aberrant phases during gait analysis.

CONCLUSIONS: These predictive capabilities enable tailored recommendations regarding the duration and intensity of ankle-foot orthosis (AFO) usage based on individual recovery needs. The analysis results are sent to the physician's device for validation and regular monitoring. Upon approval, the comprehensive report is made accessible to the patient, ensuring continuous progress tracking and timely adjustments to the treatment plan.}, } @article {pmid39592689, year = {2024}, author = {Beňo, L and Kučera, E and Drahoš, P and Pribiš, R}, title = {Transforming industrial automation: voice recognition control via containerized PLC device.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {29387}, pmid = {39592689}, issn = {2045-2322}, support = {1/0107/22//Scientific Grant Agency of the Ministry of Education, Research, Development and Youth of the Slovak Republic/ ; 1/0107/22//Scientific Grant Agency of the Ministry of Education, Research, Development and Youth of the Slovak Republic/ ; 1/0107/22//Scientific Grant Agency of the Ministry of Education, Research, Development and Youth of the Slovak Republic/ ; 1/0107/22//Scientific Grant Agency of the Ministry of Education, Research, Development and Youth of the Slovak Republic/ ; APVV-21-0125 and APVV-23-0123//Agentúra na Podporu Výskumu a Vývoja/ ; APVV-21-0125 and APVV-23-0123//Agentúra na Podporu Výskumu a Vývoja/ ; APVV-21-0125 and APVV-23-0123//Agentúra na Podporu Výskumu a Vývoja/ ; APVV-21-0125 and APVV-23-0123//Agentúra na Podporu Výskumu a Vývoja/ ; KEGA 021STU-4-2024//Cultural and Educational Grant Agency of the Ministry of Education, Research, Development and Youth of the Slovak Republic/ ; KEGA 021STU-4-2024//Cultural and Educational Grant Agency of the Ministry of Education, Research, Development and Youth of the Slovak Republic/ ; KEGA 021STU-4-2024//Cultural and Educational Grant Agency of the Ministry of Education, Research, Development and Youth of the Slovak Republic/ ; KEGA 021STU-4-2024//Cultural and Educational Grant Agency of the Ministry of Education, Research, Development and Youth of the Slovak Republic/ ; }, abstract = {The article discusses the impact of voice recognition and containerization technologies in the industrial sector, particularly on Programmable Logic Controller (PLC) devices. It highlights how voice assistants like Alexa, Siri, Cortana, and Google Assistant are pioneering and pushing the future of human-machine interfaces, with applications moving from smart homes to industrial automation. Containerization, illustrated by Docker, is transforming software deployment practices, offering benefits such as enhanced portability, modular architecture, and improved security when applied to industrial PLCs. The article introduces a novel approach to enhancing human-machine interfaces (HMIs) within industrial applications, leveraging voice recognition and containerization technologies on Programmable Logic Controllers (PLCs). Unlike traditional systems, this article integrates voice assistant with industrial PLCs through a containerized IoT architecture. This innovative framework enables efficient deployment on edge devices, supporting modular, portable, and secure operations aligned with Industry 4.0 and 5.0 paradigms. The study further includes a detailed implementation on microcontrollers and industrial PLCs, validating its application in a controlled laboratory environment and virtual model.}, } @article {pmid39576796, year = {2024}, author = {K Karim, F and Ghorashi, S and Alkhalaf, S and H A Hamza, S and Ben Ishak, A and Abdel-Khalek, S}, title = {Optimizing makespan and resource utilization in cloud computing environment via evolutionary scheduling approach.}, journal = {PloS one}, volume = {19}, number = {11}, pages = {e0311814}, pmid = {39576796}, issn = {1932-6203}, mesh = {*Cloud Computing ; *Algorithms ; }, abstract = {As a new computing resources distribution platform, cloud technology greatly influenced society with the conception of on-demand resource usage through virtualization technology. Virtualization technology allows physical resource usage in a way that will enable multiple end-users to have similar hardware infrastructure. In the cloud, many challenges exist on the provider side due to the expectations of clients. Resource scheduling (RS) is the most significant nondeterministic polynomial time (NP) hard problem in the cloud, owing to its crucial impact on cloud performance. Previous research found that metaheuristics can dramatically increase CC performance if deployed as scheduling algorithms. Therefore, this study develops an evolutionary algorithm-based scheduling approach for makespan optimization and resource utilization (EASA-MORU) technique in the cloud environment. The EASA-MORU technique aims to maximize the makespan and effectively use the resources in the cloud infrastructure. In the EASA-MORU technique, the dung beetle optimization (DBO) technique is used for scheduling purposes. Moreover, the EASA-MORU technique balances the load properly and distributes the resources based on the demands of the cloud infrastructure. The performance evaluation of the EASA-MORU method is tested using a series of performance measures. A wide range of comprehensive comparison studies emphasized that the EASA-MORU technique performs better than other methods in different evaluation measures.}, } @article {pmid39576484, year = {2025}, author = {Goh, C and Puah, M and Toh, ZH and Boon, J and Boey, D and Tay, R and Sule, AA and Liu, R and Ong, XE and Kalra, A and Gupta, S and Rousselot, A and Rojas-Carabali, W and Ang, B and Agrawal, R}, title = {Mobile Apps and Visual Function Assessment: A Comprehensive Review of the Latest Advancements.}, journal = {Ophthalmology and therapy}, volume = {14}, number = {1}, pages = {23-39}, pmid = {39576484}, issn = {2193-8245}, abstract = {INTRODUCTION: With technological advancements and the growing prevalence of smartphones, ophthalmology has opportunely harnessed medical technology for visual function assessment as a home monitoring tool for patients. Ophthalmology applications that offer these have likewise become more readily available in recent years, which may be used for early detection and monitoring of eye conditions. To date, no review has been done to evaluate and compare the utility of these apps. This review provides an updated overview of visual functions assessment using mobile applications available on the Apple App and Google Play Stores, enabling eye care professionals to make informed selections of their use in ophthalmology.

METHODS: We reviewed 160 visual function applications available on Apple iTunes and the Google Play Stores. The parameters surveyed included types of visual function tests, the involvement of healthcare professionals in their development, cost, and download count.

RESULTS: Visual tests, including visual acuity and color vision tests, were most common among apps surveyed, and they were comparable to traditional clinical methods. Certain applications were more widely used, some of which have had studies conducted to assess the reliability of test results. Limitations of these apps include the absence of healthcare professionals' involvement in their development, the lack of approval by regulatory authorities and minimal cloud-based features to communicate results to healthcare professionals.

CONCLUSIONS: The prevalence and easy access of visual function testing applications present opportunities to enhance teleophthalmology through early detection and monitoring of eye conditions. Future development to enhance the quality of the apps should involve regulatory bodies and medical professionals, followed up by research using larger samples with longer follow-up studies to review the reliability and validity of ophthalmology applications. This would potentially enable these applications to be incorporated into the comprehensive assessment and follow-up care of patients' eye health.}, } @article {pmid39568539, year = {2024}, author = {Cao, M and Ramezani, R and Katakwar, VK and Zhang, W and Boda, D and Wani, M and Naeim, A}, title = {Developing remote patient monitoring infrastructure using commercially available cloud platforms.}, journal = {Frontiers in digital health}, volume = {6}, number = {}, pages = {1399461}, pmid = {39568539}, issn = {2673-253X}, abstract = {Wearable sensor devices for continuous patient monitoring produce a large volume of data, necessitating scalable infrastructures for efficient data processing, management and security, especially concerning Patient Health Information (PHI). Adherence to the Health Insurance Portability and Accountability Act (HIPAA), a legislation that mandates developers and healthcare providers to uphold a set of standards for safeguarding patients' health information and privacy, further complicates the development of remote patient monitoring within healthcare ecosystems. This paper presents an Internet of Things (IoT) architecture designed for the healthcare sector, utilizing commercial cloud platforms like Microsoft Azure and Amazon Web Services (AWS) to develop HIPAA-compliant health monitoring systems. By leveraging cloud functionalities such as scalability, security, and load balancing, the architecture simplifies the creation of infrastructures adhering to HIPAA standards. The study includes a cost analysis of Azure and AWS infrastructures and evaluates data processing speeds and database query latencies, offering insights into their performance for healthcare applications.}, } @article {pmid39563957, year = {2024}, author = {Huang, W and Liu, X and Tian, L and Cui, G and Liu, Y}, title = {Vegetation and carbon sink response to water level changes in a seasonal lake wetland.}, journal = {Frontiers in plant science}, volume = {15}, number = {}, pages = {1445906}, pmid = {39563957}, issn = {1664-462X}, abstract = {Water level fluctuations are among the main factors affecting the development of wetland vegetation communities, carbon sinks, and ecological processes. Hongze Lake is a typical seasonal lake wetland in the Huaihe River Basin. Its water levels have experienced substantial fluctuations because of climate change, as well as gate and dam regulations. In this study, long-term cloud-free remote sensing images of water body area, net plant productivity (NPP), gross primary productivity (GPP), and Fractional vegetation cover (FVC) of the wetlands of Hongze Lake were obtained from multiple satellites by Google Earth Engine (GEE) from 2006 to 2023. The trends in FVC were analyzed using a combined Theil-Sen estimator and Mann-Kendall (MK) test. Linear regression was employed to analyze the correlation between the area of water bodies and that of different degrees of FVC. Additionally, annual frequencies of various water levels were constructed to explore their association with GPP, NPP, and FVC.The results showed that water level fluctuations significantly influence the spatial and temporal patterns of wetland vegetation cover and carbon sinks, with a significant correlation (P<0.05) between water levels and vegetation distribution. Following extensive restoration efforts, the carbon sink capacity of the Hongze Lake wetland has increased. However, it is essential to consider the carbon sink capacity in areas with low vegetation cover, for the lakeshore zone with a higher inundation frequency and low vegetation cover had a lower carbon sink capacity. These findings provide a scientific basis for the establishment of carbon sink enhancement initiatives, restoration programs, and policies to improve the ecological value of wetland ecosystem conservation areas.}, } @article {pmid39561770, year = {2024}, author = {Kullo, IJ and Conomos, MP and Nelson, SC and Adebamowo, SN and Choudhury, A and Conti, D and Fullerton, SM and Gogarten, SM and Heavner, B and Hornsby, WE and Kenny, EE and Khan, A and Khera, AV and Li, Y and Martin, I and Mercader, JM and Ng, M and Raffield, LM and Reiner, A and Rowley, R and Schaid, D and Stilp, A and Wiley, K and Wilson, R and Witte, JS and Natarajan, P and , }, title = {The PRIMED Consortium: Reducing disparities in polygenic risk assessment.}, journal = {American journal of human genetics}, volume = {111}, number = {12}, pages = {2594-2606}, pmid = {39561770}, issn = {1537-6605}, support = {U01 HG011717/HG/NHGRI NIH HHS/United States ; U01 CA261339/CA/NCI NIH HHS/United States ; U01 HG011720/HG/NHGRI NIH HHS/United States ; U01 HG011723/HG/NHGRI NIH HHS/United States ; U01 HG011710/HG/NHGRI NIH HHS/United States ; U01 HG011715/HG/NHGRI NIH HHS/United States ; U01 HG011697/HG/NHGRI NIH HHS/United States ; U01 HG011719/HG/NHGRI NIH HHS/United States ; }, mesh = {Humans ; *Genetic Predisposition to Disease ; *Genome-Wide Association Study ; Genotype ; *Multifactorial Inheritance/genetics ; Neoplasms/genetics ; Phenotype ; Risk Assessment ; Risk Factors ; }, abstract = {By improving disease risk prediction, polygenic risk scores (PRSs) could have a significant impact on health promotion and disease prevention. Due to the historical oversampling of populations with European ancestry for genome-wide association studies, PRSs perform less well in other, understudied populations, leading to concerns that clinical use in their current forms could widen health care disparities. The PRIMED Consortium was established to develop methods to improve the performance of PRSs in global populations and individuals of diverse genetic ancestry. To this end, PRIMED is aggregating and harmonizing multiple phenotype and genotype datasets on AnVIL, an interoperable secure cloud-based platform, to perform individual- and summary-level analyses using population and statistical genetics approaches. Study sites, the coordinating center, and representatives from the NIH work alongside other NHGRI and global consortia to achieve these goals. PRIMED is also evaluating ethical and social implications of PRS implementation and investigating the joint modeling of social determinants of health and PRS in computing disease risk. The phenotypes of interest are primarily cardiometabolic diseases and cancer, the leading causes of death and disability worldwide. Early deliverables of the consortium include methods for data sharing on AnVIL, development of a common data model to harmonize phenotype and genotype data from cohort studies as well as electronic health records, adaptation of recent guidelines for population descriptors to global cohorts, and sharing of PRS methods/tools. As a multisite collaboration, PRIMED aims to foster equity in the development and use of polygenic risk assessment.}, } @article {pmid39557899, year = {2024}, author = {ElSayyad, SE and Saleh, AI and Ali, HA and Saraya, MS and Rabie, AH and Abdelsalam, MM}, title = {An effective robot selection and recharge scheduling approach for improving robotic networks performance.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {28439}, pmid = {39557899}, issn = {2045-2322}, abstract = {With the ability of servers to remotely control and manage a mobile robot, mobile robots are becoming more widespread as a form of remote communication and human-robot interaction. Controlling these robots, however, can be challenging because of their power consumption, delays, or the challenge of selecting the right robot for a certain task. This paper introduces a novel methodology for enhancing the efficacy of a mobile robotic network. The key two contributions of our suggested methodology are: I: A recommended strategy that eliminates the unwieldy robots before selecting the ideal robot to satisfy the task. II: A suggested procedure that uses a fuzzy algorithm to schedule the robots that need to be recharged. Since multiple robots may need to be recharged at once, this process aims to manage and control the recharging of robots in order to avoid conflicts or crowding. The suggested approach aims to preserve the charging capacity, physical resources (e.g. Hardware components), and battery life of the robots by loading the application onto a remote server node instead of individual robots. Furthermore, our solution makes use of fog servers to speed up data transfers between smart devices and the cloud, it is also used to move processing from remote cloud servers closer to the robots, improving on-site access to location-based services and real-time interaction. Simulation results showed that, our method achieved a 2.4% improvement in average accuracy and a 2.2% enhancement in average power usage over the most recent methods in the same comparable settings.}, } @article {pmid39556277, year = {2024}, author = {Kumar, A and Singh, D and Kumar, S and Chauhan, N and Singh, S}, title = {Sunflower mapping using machine learning algorithm in Google Earth Engine platform.}, journal = {Environmental monitoring and assessment}, volume = {196}, number = {12}, pages = {1208}, pmid = {39556277}, issn = {1573-2959}, mesh = {*Helianthus ; *Machine Learning ; India ; *Algorithms ; *Support Vector Machine ; Environmental Monitoring/methods ; Agriculture/methods ; Crops, Agricultural ; }, abstract = {The sunflower crop is one of the most pro sources of vegetable oil globally. It is cultivated all around the world including Haryana, in India. However, its mapping is limited due to the requirement of huge computation power, large data storage capacity, small farm holdings, and information gap on appropriate algorithms and spectral band combinations. Thus, the current work has been done to identify an appropriate machine learning (ML) algorithm (after comparing random forest (RF) and support vector machine (SVM) reported as the best classifiers for land use and land cover) and best band combinations (among the six combinations (including Sentinel-Optical, Sentinel-SAR, and combined-Optical-SAR in single data and time series manner) for Sunflower crop mapping in Ambala and Kurukshetra districts of Haryana using Google Earth Engine (GEE) cloud platform. GEE cloud-computing system combined with RF and SVM provided Sunflower map with an accuracy ranging from 0.0% to 90% in various bands and classifiers combinations but was the highest for the RF with single date optical data. The SVM classifier tuned with parameters like kernel type, degree, gamma, and cost provided better overall accuracy for the classification of land use and land cover along with Sunflower ranging from 98.09% to 98.44% and Kappa coefficient ranging from 0.96 to 0.97 for optical data and combination of SAR and optical time series. The platform is efficient and applicable for a larger part of the country to map Sunflower and other crops with currently identified combinations of satellite data and methodology due to the availability of satellite images, advanced ML algorithms, and analytical modules on a single platform.}, } @article {pmid39556254, year = {2025}, author = {Wang, W and He, J and Yang, S}, title = {Planning for a cooler metropolitan area: a perspective on the long-term interaction of urban expansion, surface urban heat islands and blue-green spaces' cooling impact.}, journal = {International journal of biometeorology}, volume = {69}, number = {2}, pages = {367-381}, pmid = {39556254}, issn = {1432-1254}, support = {51578482//National Natural Science Foundation of China/ ; }, mesh = {*Urbanization ; Cities ; *City Planning ; *Hot Temperature ; China ; Geographic Information Systems ; Remote Sensing Technology ; }, abstract = {Urbanization is widely acknowledged as a driving force behind the increase in land surface temperature (LST), while blue-green spaces (BGS) are recognized for their cooling effect. However, research on the long-term correlation between the two in highly urbanized areas remains limited. This study aims to fill this research gap by investigating the correlation and changes between urban expansion-induced LST rise and the cooling effect of BGS in the Hangzhou metropolitan area from 2000 to 2020. Our approach combines Geographic Information System (GIS), Remote Sensing (RS), and Google Earth Engine (GEE) cloud platforms, utilizing a random forest land use classification technique in conjunction with the Geographically and temporally weighted regression (GTWR) model. The findings reveal a strong relationship between land expansion and the intensification of the surface urban heat island (SUHI) effect. The spatial heat island effect exhibits an exponential expansion in area, with an interannual LST rise of 0.4 °C. Notably, urban centers exert the highest regional heat contribution, while remote suburbs have the most significant impact on reducing LST. The impact of BGS on LST varies, fluctuating more in areas close to urban centers and less in water-rich areas. This study contributes to a better understanding of the cooling potential of BGS in rapid urbanized Metropolitan, offering valuable insights for sustainable urban planning.}, } @article {pmid39554511, year = {2024}, author = {Herbozo Contreras, LF and Truong, ND and Eshraghian, JK and Xu, Z and Huang, Z and Bersani-Veroni, TV and Aguilar, I and Leung, WH and Nikpour, A and Kavehei, O}, title = {Neuromorphic neuromodulation: Towards the next generation of closed-loop neurostimulation.}, journal = {PNAS nexus}, volume = {3}, number = {11}, pages = {pgae488}, pmid = {39554511}, issn = {2752-6542}, abstract = {Neuromodulation techniques have emerged as promising approaches for treating a wide range of neurological disorders, precisely delivering electrical stimulation to modulate abnormal neuronal activity. While leveraging the unique capabilities of AI holds immense potential for responsive neurostimulation, it appears as an extremely challenging proposition where real-time (low-latency) processing, low-power consumption, and heat constraints are limiting factors. The use of sophisticated AI-driven models for personalized neurostimulation depends on the back-telemetry of data to external systems (e.g. cloud-based medical mesosystems and ecosystems). While this can be a solution, integrating continuous learning within implantable neuromodulation devices for several applications, such as seizure prediction in epilepsy, is an open question. We believe neuromorphic architectures hold an outstanding potential to open new avenues for sophisticated on-chip analysis of neural signals and AI-driven personalized treatments. With more than three orders of magnitude reduction in the total data required for data processing and feature extraction, the high power- and memory-efficiency of neuromorphic computing to hardware-firmware co-design can be considered as the solution-in-the-making to resource-constraint implantable neuromodulation systems. This perspective introduces the concept of Neuromorphic Neuromodulation, a new breed of closed-loop responsive feedback system. It highlights its potential to revolutionize implantable brain-machine microsystems for patient-specific treatment.}, } @article {pmid39551198, year = {2024}, author = {Mooselu, MG and Nikoo, MR and Liltved, H and Bjørkenes, MS and Elnashar, A and Shojaeezadeh, SA and Weber, TKD}, title = {Assessing road construction effects on turbidity in adjacent water bodies using Sentinel-1 and Sentinel-2.}, journal = {The Science of the total environment}, volume = {957}, number = {}, pages = {177554}, doi = {10.1016/j.scitotenv.2024.177554}, pmid = {39551198}, issn = {1879-1026}, abstract = {Road construction significantly affects water resources by introducing contaminants, fragmenting habitats, and degrading water quality. This study examines the use of Remote Sensing (RS) data of Sentinel-1 (S1) and Senitnel-2 (S2) in Google Earth Engine (GEE) to do spatio-temporal analysis of turbidity in adjacent water bodies during the construction and operation of the E18 Arendal-Tvedestrand highway in southeastern Norway from 2017 to 2021. S1 radiometric data helped delineate water extents, while S2-Top of Atmosphere (TOA) multispectral data, corrected using the Modified Atmospheric correction for INland waters (MAIN), used to estimate turbidity levels. To ensure a comprehensive time series of RS data, we utilized S2-TOA data corrected with the MAIN algorithm rather than S2-Bottom Of Atmosphere (BOA) data. We validated the MAIN algorithm's accuracy against GLORIA (Global Observatory of Lake Responses to Interventions and Drivers) observations of surface water reflectance in lakes, globally. Subsequently, the corrected S2 data is used to calculate turbidity using the Novoa and Nechad retrieval algorithms and compared with GLORIA turbidity observations. Findings indicate that the MAIN algorithm adequately estimates water-leaving surface reflectance (Pearson correlation > 0.7 for wavelengths between 490 and 705 nm) and turbidity (Pearson correlation > 0.6 for both algorithms), determining Nechad as the more effective algorithm. In this regard, we used S2 corrected images with MIAN to estimate turbidity in the study area and evaluated with local gauge data and observational reports. Results indicate that the proposed framework effectively captures trends and patterns of turbidity variation in the study area. Findings verify that road construction can increase turbidity in adjacent water bodies and emphasis the employing RS data in cloud platforms like GEE can provide insights for effective long-term water quality management strategies during construction and operation phases.}, } @article {pmid39548316, year = {2024}, author = {Guo, H and Huang, R and Xu, Z}, title = {The design of intelligent highway transportation system in smart city based on the internet of things.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {28122}, doi = {10.1038/s41598-024-79903-0}, pmid = {39548316}, issn = {2045-2322}, abstract = {The design of intelligent expressway transportation system based on the Internet of Things is studied to improve the safety, travel experience, and operation management of expressway. The characteristics of the Internet of Things and cloud computing technology and its application on the expressway are analyzed, and the system design requirements of expressway intelligent transportation are understood. Besides, the overall architecture of the system is studied and designed. The IaaS layer, PaaS layer, and SaaS layer of the cloud platform are designed and deployed. The intelligent information system can make expressway highly informative. The simulation experiments reveal that the system only needs 120 milliseconds in accident processing time, which is far lower than the intelligent transportation system that only uses edge computing technology (201 milliseconds) and the intelligent transportation system that only uses cloud computing technology (443 milliseconds). Meanwhile, the accident response time is only 12 s, which is also superior to other models. In terms of cost-effectiveness, the monthly cost of the system is 7004 yuan, with a CPU utilization rate of 53%, demonstrating good cost-effectiveness and resource utilization efficiency. In addition, compared with the existing system, the average traffic congestion time has been reduced by 25%, the traffic accident rate has been reduced by 18%, and the accident rate has been reduced by 27%. The intelligent traffic system design of expressway, expressway safety, travel service, and operation management is effectively improved by researching the intelligent traffic system design of expressway.}, } @article {pmid39547804, year = {2025}, author = {Parente, DJ}, title = {Leveraging the All of Us Database for Primary Care Research with Large Datasets.}, journal = {Journal of the American Board of Family Medicine : JABFM}, volume = {37}, number = {Supplement2}, pages = {S144-S155}, doi = {10.3122/jabfm.2023.230453R2}, pmid = {39547804}, issn = {1558-7118}, mesh = {*Primary Health Care/organization & administration/statistics & numerical data ; Humans ; United States ; *Databases, Factual ; Precision Medicine/methods ; National Institutes of Health (U.S.) ; *Biomedical Research ; }, abstract = {The National Institutes of Health (NIH) are supporting the All of Us research program, a large multicenter initiative to accelerate precision medicine. The All of Us database contains information on greater than 400,000 individuals spanning thousands of medical conditions, drug exposure types, and laboratory test types. These data can be correlated with genomic information and with survey data on social and environmental factors which influence health. A core principle of the All of Us program is that participants should reflect the diversity present in the United States population.The All of Us database has advanced many areas of medicine but is currently underutilized by primary care and public health researchers. In this Special Communication article, I seek to reduce the "barrier to entry" for primary care researchers to develop new projects within the All of Us Researcher Workbench. This Special Communication discusses (1) obtaining access to the database, (2) using the database securely and responsibly, (3) the key design concepts of the Researcher Workbench, and (4) details of data set extraction and analysis in the cloud computing environment. Fully documented, tutorial R statistical programming language and Python programs are provided alongside this article, which researchers may freely adapt under the open-source MIT license. The primary care research community should use the All of Us database to accelerate innovation in primary care research, make epidemiologic discoveries, promote community health, and further the infrastructure-building strategic priority of the family medicine 2024 to 2030 National Research Strategy.}, } @article {pmid39543174, year = {2024}, author = {Batchu, RK and Bikku, T and Thota, S and Seetha, H and Ayoade, AA}, title = {A novel optimization-driven deep learning framework for the detection of DDoS attacks.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {28024}, doi = {10.1038/s41598-024-77554-9}, pmid = {39543174}, issn = {2045-2322}, abstract = {Distributed denial of service (DDoS) attack is one of the most hazardous assaults in cloud computing or networking. By depleting resources, this attack renders the services unavailable to end users and leads to significant financial and reputational damage. Hence, identifying such threats is crucial to minimize revenue loss, market share, and productivity loss and enhance the brand reputation. In this study, we implemented an effective intrusion detection system using deep learning approach. The suggested framework includes three phases: Data pre-processing, Data balancing, and Classification. First, we prepare the valid data, which is helpful for further processing. Then, we balance the given pre-processed data by Conditional generative adversarial network (CGAN), and as a result, we can minimize the bias towards the majority classes. Finally, we distinguish whether the traffic is attack or benign using a stacked sparse denoising autoencoder (SSDAE) with a firefly-black widow (FA-BW) hybrid optimization algorithm. All these experiments are validated through the CICDDoS2019 dataset and compared with well-received techniques. From these findings, we observed that the proposed strategy detects DDoS attacks significantly more accurately than other approaches. Based on our findings, this study highlights the crucial role played by advanced deep learning techniques and hybrid optimization algorithms in strengthening cybersecurity against DDoS attacks.}, } @article {pmid39541580, year = {2024}, author = {Nagarajan, R and Kondo, M and Salas, F and Sezgin, E and Yao, Y and Klotzman, V and Godambe, SA and Khan, N and Limon, A and Stephenson, G and Taraman, S and Walton, N and Ehwerhemuepha, L and Pandit, J and Pandita, D and Weiss, M and Golden, C and Gold, A and Henderson, J and Shippy, A and Celi, LA and Hogan, WR and Oermann, EK and Sanger, T and Martel, S}, title = {Economics and Equity of Large Language Models: Health Care Perspective.}, journal = {Journal of medical Internet research}, volume = {26}, number = {}, pages = {e64226}, pmid = {39541580}, issn = {1438-8871}, mesh = {Humans ; *Delivery of Health Care ; Language ; }, abstract = {Large language models (LLMs) continue to exhibit noteworthy capabilities across a spectrum of areas, including emerging proficiencies across the health care continuum. Successful LLM implementation and adoption depend on digital readiness, modern infrastructure, a trained workforce, privacy, and an ethical regulatory landscape. These factors can vary significantly across health care ecosystems, dictating the choice of a particular LLM implementation pathway. This perspective discusses 3 LLM implementation pathways-training from scratch pathway (TSP), fine-tuned pathway (FTP), and out-of-the-box pathway (OBP)-as potential onboarding points for health systems while facilitating equitable adoption. The choice of a particular pathway is governed by needs as well as affordability. Therefore, the risks, benefits, and economics of these pathways across 4 major cloud service providers (Amazon, Microsoft, Google, and Oracle) are presented. While cost comparisons, such as on-demand and spot pricing across the cloud service providers for the 3 pathways, are presented for completeness, the usefulness of managed services and cloud enterprise tools is elucidated. Managed services can complement the traditional workforce and expertise, while enterprise tools, such as federated learning, can overcome sample size challenges when implementing LLMs using health care data. Of the 3 pathways, TSP is expected to be the most resource-intensive regarding infrastructure and workforce while providing maximum customization, enhanced transparency, and performance. Because TSP trains the LLM using enterprise health care data, it is expected to harness the digital signatures of the population served by the health care system with the potential to impact outcomes. The use of pretrained models in FTP is a limitation. It may impact its performance because the training data used in the pretrained model may have hidden bias and may not necessarily be health care-related. However, FTP provides a balance between customization, cost, and performance. While OBP can be rapidly deployed, it provides minimal customization and transparency without guaranteeing long-term availability. OBP may also present challenges in interfacing seamlessly with downstream applications in health care settings with variations in pricing and use over time. Lack of customization in OBP can significantly limit its ability to impact outcomes. Finally, potential applications of LLMs in health care, including conversational artificial intelligence, chatbots, summarization, and machine translation, are highlighted. While the 3 implementation pathways discussed in this perspective have the potential to facilitate equitable adoption and democratization of LLMs, transitions between them may be necessary as the needs of health systems evolve. Understanding the economics and trade-offs of these onboarding pathways can guide their strategic adoption and demonstrate value while impacting health care outcomes favorably.}, } @article {pmid39540868, year = {2024}, author = {Hasavari, S and Esmaeilzadeh, P}, title = {Appropriately Matching Transport Care Units to Patients in Interhospital Transport Care: Implementation Study.}, journal = {JMIR formative research}, volume = {8}, number = {}, pages = {e65626}, pmid = {39540868}, issn = {2561-326X}, mesh = {Humans ; *Electronic Health Records ; *Patient Transfer ; Transportation of Patients/organization & administration ; Computer Security ; }, abstract = {BACKGROUND: In interfacility transport care, a critical challenge exists in accurately matching ambulance response levels to patients' needs, often hindered by limited access to essential patient data at the time of transport requests. Existing systems cannot integrate patient data from sending hospitals' electronic health records (EHRs) into the transfer request process, primarily due to privacy concerns, interoperability challenges, and the sensitive nature of EHR data. We introduce a distributed digital health platform, Interfacility Transport Care (ITC)-InfoChain, designed to solve this problem without compromising EHR security or data privacy.

OBJECTIVE: This study aimed to detail the implementation of ITC-InfoChain, a secure, blockchain-based platform designed to enhance real-time data sharing without compromising data privacy or EHR security.

METHODS: The ITC-InfoChain platform prototype was implemented on Amazon Web Services cloud infrastructure, using Hyperledger Fabric as a permissioned blockchain. Key elements included participant registration, identity management, and patient data collection isolated from the sending hospital's EHR system. The client program submits encrypted patient data to a distributed ledger, accessible to the receiving facility's critical care unit at the time of transport request and emergency medical services (EMS) teams during transport through the PatienTrack web app. Performance was evaluated through key performance indicators such as data transaction times and scalability across transaction loads.

RESULTS: The ITC-InfoChain demonstrated strong performance and scalability. Data transaction times averaged 3.1 seconds for smaller volumes (1-20 transactions) and 6.4 seconds for 100 transactions. Optimized configurations improved processing times to 1.8-1.9 seconds for 400 transactions. These results confirm the platform's capacity to handle high transaction volumes, supporting timely, real-time data access for decision-making during transport requests and patient transfers.

CONCLUSIONS: The ITC-InfoChain platform addresses the challenge of matching appropriate transport units to patient needs by ensuring data privacy, integrity, and real-time data sharing, enhancing the coordination of patient care. The platform's success suggests potential for regional pilots and broader adoption in secure health care systems. Stakeholder resistance due to blockchain unfamiliarity and data privacy concerns remains. Funding has been sought to support a pilot program to address these challenges through targeted education and engagement.}, } @article {pmid39525653, year = {2024}, author = {Gupta, R and Zuquim, G and Tuomisto, H}, title = {Seamless Landsat-7 and Landsat-8 data composites covering all Amazonia.}, journal = {Data in brief}, volume = {57}, number = {}, pages = {111034}, pmid = {39525653}, issn = {2352-3409}, abstract = {The use of satellite remote sensing has considerably improved scientific understanding of the heterogeneity of Amazonian rainforests. However, the persistent cloud cover and strong Bidirectional Reflectance Distribution Function (BRDF) effects make it difficult to produce up-to-date satellite image composites over the huge extent of Amazonia. Advanced pre-processing and pixel-based compositing over an extended time period are needed to fill the data gaps caused by clouds and to achieve consistency in pixel values across space. Recent studies have found that the multidimensional median, also known as medoid, algorithm is robust to outliers and noise, and thereby provides a useful approach for pixel-based compositing. Here we describe Landsat-7 and Landsat-8 composites covering all Amazonia that were produced using Landsat data from the years 2013-2021 and processed with Google Earth Engine (GEE). These products aggregate reflectance values over a relatively long time, and are, therefore, especially useful for identifying permanent characteristics of the landscape, such as vegetation heterogeneity that is driven by differences in geologically defined edaphic conditions. To make similar compositing possible over other areas and time periods (including shorter time periods for change detection), we make the workflow available in GEE. Visual inspection and comparison with other Landsat products confirmed that the pre-processing workflow was efficient and the composites are seamless and without data gaps, although some artifacts present in the source data remain. Basin-wide Landsat-7 and Landsat-8 composites are expected to facilitate both local and broad-scale ecological and biogeographical studies, species distribution modeling, and conservation planning in Amazonia.}, } @article {pmid39533201, year = {2024}, author = {Ko, G and Kim, PG and Yoon, BH and Kim, J and Song, W and Byeon, I and Yoon, J and Lee, B and Kim, YK}, title = {Closha 2.0: a bio-workflow design system for massive genome data analysis on high performance cluster infrastructure.}, journal = {BMC bioinformatics}, volume = {25}, number = {1}, pages = {353}, pmid = {39533201}, issn = {1471-2105}, support = {2020M3A9I6A01036057//Korean Ministry of Science and Technology/ ; 2020M3A9I6A01036057//Korean Ministry of Science and Technology/ ; 2020M3A9I6A01036057//Korean Ministry of Science and Technology/ ; 2020M3A9I6A01036057//Korean Ministry of Science and Technology/ ; 2020M3A9I6A01036057//Korean Ministry of Science and Technology/ ; 2020M3A9I6A01036057//Korean Ministry of Science and Technology/ ; 2020M3A9I6A01036057//Korean Ministry of Science and Technology/ ; 2020M3A9I6A01036057//Korean Ministry of Science and Technology/ ; RS-2022-00155857//Institute of Information & Communications Technology Planning & Evaluation (IITP)/ ; }, mesh = {*Genomics/methods ; *Software ; *Cloud Computing ; *High-Throughput Nucleotide Sequencing/methods ; Workflow ; }, abstract = {BACKGROUND: The explosive growth of next-generation sequencing data has resulted in ultra-large-scale datasets and significant computational challenges. As the cost of next-generation sequencing (NGS) has decreased, the amount of genomic data has surged globally. However, the cost and complexity of the computational resources required continue to be substantial barriers to leveraging big data. A promising solution to these computational challenges is cloud computing, which provides researchers with the necessary CPUs, memory, storage, and software tools.

RESULTS: Here, we present Closha 2.0, a cloud computing service that offers a user-friendly platform for analyzing massive genomic datasets. Closha 2.0 is designed to provide a cloud-based environment that enables all genomic researchers, including those with limited or no programming experience, to easily analyze their genomic data. The new 2.0 version of Closha has more user-friendly features than the previous 1.0 version. Firstly, the workbench features a script editor that supports Python, R, and shell script programming, enabling users to write scripts and integrate them into their pipelines. This functionality is particularly useful for downstream analysis. Second, Closha 2.0 runs on containers, which execute each tool in an independent environment. This provides a stable environment and prevents dependency issues and version conflicts among tools. Additionally, users can execute each step of a pipeline individually, allowing them to test applications at each stage and adjust parameters to achieve the desired results. We also updated a high-speed data transmission tool called GBox that facilitates the rapid transfer of large datasets.

CONCLUSIONS: The analysis pipelines on Closha 2.0 are reproducible, with all analysis parameters and inputs being permanently recorded. Closha 2.0 simplifies multi-step analysis with drag-and-drop functionality and provides a user-friendly interface for genomic scientists to obtain accurate results from NGS data. Closha 2.0 is freely available at https://www.kobic.re.kr/closha2 .}, } @article {pmid39527879, year = {2024}, author = {Aslam, RW and Naz, I and Shu, H and Yan, J and Quddoos, A and Tariq, A and Davis, JB and Al-Saif, AM and Soufan, W}, title = {Multi-temporal image analysis of wetland dynamics using machine learning algorithms.}, journal = {Journal of environmental management}, volume = {371}, number = {}, pages = {123123}, doi = {10.1016/j.jenvman.2024.123123}, pmid = {39527879}, issn = {1095-8630}, mesh = {*Wetlands ; *Machine Learning ; Algorithms ; Climate Change ; Environmental Monitoring/methods ; Conservation of Natural Resources/methods ; Urbanization ; Ecosystem ; }, abstract = {Wetlands play a crucial role in enhancing groundwater quality, mitigating natural hazards, controlling erosion, and providing essential habitats for unique flora and wildlife. Despite their significance, wetlands are facing decline in various global locations, underscoring the need for effective mapping, monitoring, and predictive modeling approaches. Recent advances in machine learning, time series earth observation data, and cloud computing have opened up new possibilities to address the challenges of large-scale wetlands mapping and dynamics forecasting. This research conducts a comprehensive analysis of wetland dynamics in the Thatta region, encompassing Haleji & Kinjhar Lake in Pakistan, and evaluates the efficacy of different classification systems. Leveraging Google Earth Engine, Landsat imagery, and various spectral indices, we assess four classification techniques to derive accurate wetland mapping results. Our findings demonstrate that Random Forest emerged as the most efficient and accurate method, achieving 87% accuracy across all time periods. Change detection analysis reveals a significant and alarming decline in Haleji & Kinjhar Lake wetlands over 1990-2020, primarily driven by agricultural expansion, urbanization, groundwater extraction, and climate change impacts like rising temperatures and reduced precipitation. If left unaddressed, this continued wetland loss could have severe implications for aquatic and terrestrial species, water and soil quality, wildlife populations, and local livelihoods. The study predicts future wetland dynamics under different scenarios - enhancing drainage for farmland conversion (10-20% increase), increasing urbanization (10-20% expansion), escalating groundwater extraction (7.2m annual decline), and climate change (up to 5 °C warming and 54% precipitation deficit by 2050). These scenarios forecast sustained long-term wetland deterioration driven by anthropogenic pressures and climate change. To guide conservation strategies, the research integrates satellite data analytics, machine learning algorithms, and spatial modeling to generate actionable insights into multifaceted wetland vulnerabilities. Findings provide a robust baseline to inform policies ensuring sustainable management and preservation of these vital ecosystems amidst escalating human and climate threats. Over 1990-2020, the Thatta region witnessed a 352.8 sq.km loss of wetlands, necessitating urgent restoration efforts to safeguard their invaluable ecosystem services.}, } @article {pmid39527419, year = {2024}, author = {Wei, J and Wang, L and Zhou, Z and Zhuo, L and Zeng, X and Fu, X and Zou, Q and Li, K and Zhou, Z}, title = {BloodPatrol: Revolutionizing Blood Cancer Diagnosis - Advanced Real-Time Detection Leveraging Deep Learning & Cloud Technologies.}, journal = {IEEE journal of biomedical and health informatics}, volume = {PP}, number = {}, pages = {}, doi = {10.1109/JBHI.2024.3496294}, pmid = {39527419}, issn = {2168-2208}, abstract = {Cloud computing and Internet of Things (IoT) technologies are gradually becoming the technological changemakers in cancer diagnosis. Blood cancer is an aggressive disease affecting the blood, bone marrow, and lymphatic system, and its early detection is crucial for subsequent treatment. Flow cytometry has been widely studied as a commonly used method for detecting blood cancer. However, the high computation and resource consumption severely limit its practical application, especifically in regions with limited medical and computational resources. In this study, with the help of cloud computing and IoT technologies, we develop a novel blood cancer dynamic monitoring diagnostic model named BloodPatrol based on an intelligent feature weight fusion mechanism. The proposed model is capable of capturing the dual-view importance relationship between cell samples and features, greatly improving prediction accuracy and significantly surpassing previous models. Besides, benefiting from the powerful processing ability of cloud computing, BloodPatrol can run on a distributed network to efficiently process large-scale cell data, which provides immediate and scalable blood cancer diagnostic services. We have also created a cloud diagnostic platform to facilitate access to our work, the latest access link and updates are available at: https://github.com/kkkayle/BloodPatrol.}, } @article {pmid39523221, year = {2024}, author = {Huggins, DR and Phillips, CL and Carlson, BR and Casanova, JJ and Heineck, GC and Bean, AR and Brooks, ES}, title = {The LTAR Cropland Common Experiment at R. J. Cook Agronomy Farm.}, journal = {Journal of environmental quality}, volume = {53}, number = {6}, pages = {839-850}, doi = {10.1002/jeq2.20647}, pmid = {39523221}, issn = {1537-2537}, mesh = {*Agriculture/methods ; *Crops, Agricultural ; Farms ; Washington ; Environmental Monitoring/methods ; Soil/chemistry ; Ecosystem ; }, abstract = {Dryland agriculture in the Inland Pacific Northwest is challenged in part by rising input costs for seed, fertilizer, and agrichemicals; threats to water quality and soil health, including soil erosion, organic matter decline, acidification, compaction, and nutrient imbalances; lack of cropping system diversity; herbicide resistance; and air quality concerns from atmospheric emissions of particulate matter and greenhouse gases. Technological advances such as rapid data acquisition, artificial intelligence, cloud computing, and robotics have helped fuel innovation and discovery but have also further complicated agricultural decision-making and research. Meeting these challenges has promoted interest in (1) supporting long-term research that enables assessment of ecosystem service trade-offs and advances sustainable and regenerative approaches to agriculture, and (2) developing coproduction research approaches that actively engage decision-makers and accelerate innovation. The R. J. Cook Agronomy Farm (CAF) Long-Term Agroecosystem Research (LTAR) site established a cropping systems experiment in 2017 that contrasts prevailing (PRV) and alternative (ALT) practices at field scales over a proposed 30-year time frame. The experimental site is on the Washington State University CAF near Pullman, WA. Cropping practices include a wheat-based cropping system with wheat (Triticum aestivum L.), canola (Brassica napus, variety napus), chickpea (Cicer arietinum), and winter pea (Pisum sativum), with winter wheat produced every third year under the ALT practices of continuous no-tillage and precision applied N, compared to the PRV practice of reduced tillage (RT) and uniformly applied agrichemicals. Biophysical measurements are made at georeferenced locations that capture field-scale spatial variability at temporal intervals that follow approved methods for each agronomic and environmental metric. Research to date is assessing spatial and temporal variations in cropping system performance (e.g., crop yield, soil health, and water and air quality) for ALT versus PRV and associated tradeoffs. Future research will explore a coproduction approach with the intent of advancing discovery, innovation, and impact through collaborative stakeholder-researcher partnerships that direct and implement research priorities.}, } @article {pmid39520861, year = {2024}, author = {Ranjan, AK and Gorai, AK}, title = {Assessment of global carbon dynamics due to mining-induced forest cover loss during 2000-2019 using satellite datasets.}, journal = {Journal of environmental management}, volume = {371}, number = {}, pages = {123271}, doi = {10.1016/j.jenvman.2024.123271}, pmid = {39520861}, issn = {1095-8630}, mesh = {*Mining ; *Forests ; *Climate Change ; Carbon Dioxide/analysis ; Carbon Sequestration ; Conservation of Natural Resources ; Carbon/analysis ; Environmental Monitoring ; }, abstract = {Mining activities significantly contribute to forest cover loss (FCL), subsequently altering global carbon dynamics and exacerbating climate change. The present study aims to estimate the contributions of mining-induced FCL to carbon sequestration loss (CSL) and carbon dioxide (CO2) emissions from 2000 to 2019 using the proxy datasets. For FCL analysis, the global FCL data at 30 m spatial resolution, developed by Hansen et al. (2013), was employed in the Google Earth Engine (GEE) cloud platform. Furthermore, for CSL and CO2 emissions assessment, Moderate Resolution Imaging Spectroradiometer (MODIS)-based Net Primary Productivity (NPP) data and Zhang and Liang (2020)-developed biomass datasets were used, respectively. The outcomes of the study exhibited approximately 16,785.90 km[2] FCL globally due to mining activities, resulting in an estimated CSL of ∼36,363.17 Gg CO2/year and CO2 emissions of ∼490,525.30 Gg CO2. Indonesia emerged as the largest contributor to mining-induced FCL, accounting for 3,622.78 km[2] of deforestation, or 21.58% of the global total. Brazil and Canada followed, with significant deforestation and CO2 emissions. The findings revealed that mining activities are a major driver of deforestation, particularly in resource-rich regions, leading to substantial environmental degradation. The relative FCL was notably high in smaller countries like Suriname and Guyana, where mining activities constituted a significant proportion of total deforestation. The present study underscores the urgent need for robust regulatory frameworks, sustainable land management practices, and coordinated international efforts to mitigate the adverse environmental impacts of mining. The findings of this study can inform policymakers and stakeholders, leading to more effective conservation strategies and benefiting society by promoting environmental sustainability and resilience against climate change.}, } @article {pmid39518794, year = {2024}, author = {Tangorra, FM and Buoio, E and Calcante, A and Bassi, A and Costa, A}, title = {Internet of Things (IoT): Sensors Application in Dairy Cattle Farming.}, journal = {Animals : an open access journal from MDPI}, volume = {14}, number = {21}, pages = {}, pmid = {39518794}, issn = {2076-2615}, abstract = {The expansion of dairy cattle farms and the increase in herd size have made the control and management of animals more complex, with potentially negative effects on animal welfare, health, productive/reproductive performance and consequently farm income. Precision Livestock Farming (PLF) is based on the use of sensors to monitor individual animals in real time, enabling farmers to manage their herds more efficiently and optimise their performance. The integration of sensors and devices used in PLF with the Internet of Things (IoT) technologies (edge computing, cloud computing, and machine learning) creates a network of connected objects that improve the management of individual animals through data-driven decision-making processes. This paper illustrates the main PLF technologies used in the dairy cattle sector, highlighting how the integration of sensors and devices with IoT addresses the challenges of modern dairy cattle farming, leading to improved farm management.}, } @article {pmid39517991, year = {2024}, author = {Yang, D and Wu, J and He, Y}, title = {Optimizing the Agricultural Internet of Things (IoT) with Edge Computing and Low-Altitude Platform Stations.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {21}, pages = {}, pmid = {39517991}, issn = {1424-8220}, support = {Grant 62401230//National Natural Science Foundation of China/ ; Grant LQ24F010003//Zhejiang Provincial Natural Science Foundation of China/ ; }, abstract = {Using low-altitude platform stations (LAPSs) in the agricultural Internet of Things (IoT) enables the efficient and precise monitoring of vast and hard-to-reach areas, thereby enhancing crop management. By integrating edge computing servers into LAPSs, data can be processed directly at the edge in real time, significantly reducing latency and dependency on remote cloud servers. Motivated by these advancements, this paper explores the application of LAPSs and edge computing in the agricultural IoT. First, we introduce an LAPS-aided edge computing architecture for the agricultural IoT, in which each task is segmented into several interdependent subtasks for processing. Next, we formulate a total task processing delay minimization problem, taking into account constraints related to task dependency and priority, as well as equipment energy consumption. Then, by treating the task dependencies as directed acyclic graphs, a heuristic task processing algorithm with priority selection is developed to solve the formulated problem. Finally, the numerical results show that the proposed edge computing scheme outperforms state-of-the-art works and the local computing scheme in terms of the total task processing delay.}, } @article {pmid39517887, year = {2024}, author = {Orro, A and Geminiani, GA and Sicurello, F and Modica, M and Pegreffi, F and Neri, L and Augello, A and Botteghi, M}, title = {A Cloud Infrastructure for Health Monitoring in Emergency Response Scenarios.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {21}, pages = {}, pmid = {39517887}, issn = {1424-8220}, mesh = {*Cloud Computing ; Humans ; Monitoring, Physiologic/methods/instrumentation ; *Wearable Electronic Devices ; Vital Signs/physiology ; Electrocardiography/methods ; Internet of Things ; Emergency Responders ; Emergency Medical Services/methods ; }, abstract = {Wearable devices have a significant impact on society, and recent advancements in modern sensor technologies are opening up new possibilities for healthcare applications. Continuous vital sign monitoring using Internet of Things solutions can be a crucial tool for emergency management, reducing risks in rescue operations and ensuring the safety of workers. The massive amounts of data, high network traffic, and computational demands of a typical monitoring application can be challenging to manage with traditional infrastructure. Cloud computing provides a solution with its built-in resilience and elasticity capabilities. This study presents a Cloud-based monitoring architecture for remote vital sign tracking of paramedics and medical workers through the use of a mobile wearable device. The system monitors vital signs such as electrocardiograms and breathing patterns during work sessions, and it is able to manage real-time alarm events to a personnel management center. In this study, 900 paramedics and emergency workers were monitored using wearable devices over a period of 12 months. Data from these devices were collected, processed via Cloud infrastructure, and analyzed to assess the system's reliability and scalability. The results showed a significant improvement in worker safety and operational efficiency. This study demonstrates the potential of Cloud-based systems and Internet of Things devices in enhancing emergency response efforts.}, } @article {pmid39517879, year = {2024}, author = {Zhang, Y and Xia, G and Yu, C and Li, H and Li, H}, title = {Fault-Tolerant Scheduling Mechanism for Dynamic Edge Computing Scenarios Based on Graph Reinforcement Learning.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {21}, pages = {}, pmid = {39517879}, issn = {1424-8220}, abstract = {With the proliferation of Internet of Things (IoT) devices and edge nodes, edge computing has taken on much of the real-time data processing and low-latency response tasks which were previously managed by cloud computing. However, edge computing often encounters challenges such as network instability and dynamic resource variations, which can lead to task interruptions or failures. To address these issues, developing a fault-tolerant scheduling mechanism is crucial to ensure that a system continues to operate efficiently even when some nodes experience failures. In this paper, we propose an innovative fault-tolerant scheduling model based on asynchronous graph reinforcement learning. This model incorporates a deep reinforcement learning framework built upon a graph neural network, allowing it to accurately capture the complex communication relationships between computing nodes. The model generates fault-tolerant scheduling actions as output, ensuring robust performance in dynamic environments. Additionally, we introduce an asynchronous model update strategy, which enhances the model's capability of real-time dynamic scheduling through multi-threaded parallel interactions with the environment and frequent model updates via running threads. The experimental results demonstrate that the proposed method outperformed the baseline algorithms in terms of quality of service (QoS) assurance and fault-tolerant scheduling capabilities.}, } @article {pmid39517830, year = {2024}, author = {Oliveira, D and Mafra, S}, title = {Implementation of an Intelligent Trap for Effective Monitoring and Control of the Aedes aegypti Mosquito.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {21}, pages = {}, pmid = {39517830}, issn = {1424-8220}, support = {403827/2021-3//National Council for Scientific and Technological Development-CNPq/ ; 2021/06946-0//FAPESP/ ; APQ-03283-17//Fundação de Amparo à Pesquisa do Estado de Minas Gerais (FAPEMIG)/ ; APQ-03162-24//Fundação de Amparo à Pesquisa do Estado de Minas Gerais (FAPEMIG)/ ; 052/2023//MCTI/ ; }, mesh = {*Aedes/physiology ; Animals ; *Mosquito Control/methods/instrumentation ; *Algorithms ; Machine Learning ; Artificial Intelligence ; Mosquito Vectors ; Humans ; Dengue/prevention & control/transmission ; }, abstract = {Aedes aegypti is a mosquito species known for its role in transmitting dengue fever, a viral disease prevalent in tropical and subtropical regions. Recognizable by its white markings and preference for urban habitats, this mosquito breeds in standing water near human dwellings. A promising approach to combat the proliferation of mosquitoes is the use of smart traps, equipped with advanced technologies to attract, capture, and monitor them. The most significant results include 97% accuracy in detecting Aedes aegypti, 100% accuracy in identifying bees, and 90.1% accuracy in classifying butterflies in the laboratory. Field trials successfully validated and identified areas for continued improvement. The integration of technologies such as Internet of Things (IoT), cloud computing, big data, and artificial intelligence has the potential to revolutionize pest control, significantly improving mosquito monitoring and control. The application of machine learning (ML) algorithms and computer vision for the identification and classification of Aedes aegypti is a crucial part of this process. This article proposes the development of a smart trap for selective control of winged insects, combining IoT devices, high-resolution cameras, and advanced ML algorithms for insect detection and classification. The intelligent system features the YOLOv7 algorithm (You Only Look Once v7) that is capable of detecting and counting insects in real time, combined with LoRa/LoRaWan connectivity and IoT system intelligence. This adaptive approach is effective in combating Aedes aegypti mosquitoes in real time.}, } @article {pmid39517761, year = {2024}, author = {Zheng, H and Hou, H and Tian, D and Tong, C and Qin, Z}, title = {Evaluating the Patterns of Maize Development in the Hetao Irrigation Region Using the Sentinel-1 GRD SAR Bipolar Descriptor.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {21}, pages = {}, pmid = {39517761}, issn = {1424-8220}, support = {2023JBGS0014//Research and demonstration of key technologies of efficient water saving in the forage belt in the Yellow River Basin of Inner Mongolia/ ; }, abstract = {Assessing maize yield is critical, as it is directly influenced by the crop's growth conditions. Therefore, real-time monitoring of maize growth is necessary. Regular monitoring of maize growth indicators is essential for optimizing irrigation management and evaluating agricultural yield. However, quantifying the physical aspects of regional crop development using time-series data is a challenging task. This research was conducted at the Dengkou Experimental Station in the Hetao irrigation area, Northwest China, to develop a monitoring tool for regional maize growth parameters. The tool aimed to establish a correlation between satellite-based physical data and actual crop growth on the ground. This study utilized dual-polarization Sentinel-1A GRD SAR data, accessible via the Google Earth Engine (GEE) cloud platform. Three polarization descriptors were introduced: θc (pseudo-scattering type parameter), Hc (pseudo-scattering entropy parameter), and mc (co-polar purity parameter). Using an unsupervised clustering framework, the maize-growing area was classified into several scattering mechanism groups, and the growth characteristics of the maize crop were analyzed. The results showed that throughout the maize development cycle, the parameters θc, Hc, and mc varied within the ranges of 26.82° to 42.13°, 0.48 to 0.89, and 0.32 to 0.85, respectively. During the leaf development stage, approximately 80% of the maize sampling points were concentrated in the low-to-moderate entropy scattering zone. As the plants reached the big trumpet stage, the entire cluster shifted to the high-entropy vegetation scattering zone. Finally, at maturity, over 60% of the sampling points were located in the high-entropy distribution scattering zone. This study presents an advanced analytical tool for crop management and yield estimation by utilizing precise and high-resolution spatial and temporal data on crop growth dynamics. The tool enhances the accuracy of crop growth management across different spatial and temporal conditions.}, } @article {pmid39510367, year = {2024}, author = {Adams, MCB and Griffin, C and Adams, H and Bryant, S and Hurley, RW and Topaloglu, U}, title = {Adapting the open-source Gen3 platform and kubernetes for the NIH HEAL IMPOWR and MIRHIQL clinical trial data commons: Customization, cloud transition, and optimization.}, journal = {Journal of biomedical informatics}, volume = {159}, number = {}, pages = {104749}, pmid = {39510367}, issn = {1532-0480}, support = {K08 EB022631/EB/NIBIB NIH HHS/United States ; R24 DA055306/DA/NIDA NIH HHS/United States ; U24 DA058606/DA/NIDA NIH HHS/United States ; }, mesh = {Humans ; *Clinical Trials as Topic ; *Software ; United States ; *Cloud Computing ; National Institutes of Health (U.S.) ; Chronic Pain/drug therapy ; Analgesics, Opioid/therapeutic use ; }, abstract = {OBJECTIVE: This study aims to provide the decision-making framework, strategies, and software used to successfully deploy the first combined chronic pain and opioid use data clinical trial data commons using the Gen3 platform.

MATERIALS AND METHODS: The approach involved adapting the open-source Gen3 platform and Kubernetes for the needs of the NIH HEAL IMPOWR and MIRHIQL networks. Key steps included customizing the Gen3 architecture, transitioning from Amazon to Google Cloud, adapting data ingestion and harmonization processes, ensuring security and compliance for the Kubernetes environment, and optimizing performance and user experience.

RESULTS: The primary result was a fully operational IMPOWR data commons built on Gen3. Key features include a modular architecture supporting diverse clinical trial data types, automated processes for data management, fine-grained access control and auditing, and researcher-friendly interfaces for data exploration and analysis.

DISCUSSION: The successful development of the Wake Forest IDEA-CC data commons represents a significant milestone for chronic pain and addiction research. Harmonized, FAIR data from diverse studies can be discovered in a secure, scalable repository. Challenges remain in long-term maintenance and governance, but the commons provides a foundation for accelerating scientific progress. Key lessons learned include the importance of engaging both technical and domain experts, the need for flexible yet robust infrastructure, and the value of building on established open-source platforms.

CONCLUSION: The WF IDEA-CC Gen3 data commons demonstrates the feasibility and value of developing a shared data infrastructure for chronic pain and opioid use research. The lessons learned can inform similar efforts in other clinical domains.}, } @article {pmid39507985, year = {2024}, author = {Pan, J and Zhang, T and Zhang, Y and Lin, X and Li, W and Song, C and Lai, H and Yan, X and Wang, X and Qu, X and Deng, Z and Chen, X and Quan, L and Zhao, Q and Dong, Y and Zhang, W and Wu, K and Tang, X}, title = {[Digital Intelligence Drives the High-Quality Development of the Healthcare Service System: Development Mechanisms and Implementation Pathway].}, journal = {Sichuan da xue xue bao. Yi xue ban = Journal of Sichuan University. Medical science edition}, volume = {55}, number = {5}, pages = {1055-1062}, pmid = {39507985}, issn = {1672-173X}, mesh = {*Delivery of Health Care ; China ; *Artificial Intelligence ; Digital Technology ; Internet of Things ; Cloud Computing ; Big Data ; Blockchain ; }, abstract = {The rapid development of digital intelligence technologies is providing a powerful boost to the high-quality development of the healthcare system. Considering the current state of our healthcare services and guided by General Secretary Xi Jinping's insights on new quality productive forces and the directives from Third Plenary Session of Communist Party of China's 20th Central Committee, the high-quality development of the healthcare service system should focus on digital intelligence technologies such as cloud computing, big data, privacy computing, blockchain, Internet of Things (IoT), mobile computing, and AI. The key measures should include the optimization of production factors, services, and governance. Emphasis should be placed on enhancing the efficient and intensive development of the development model, ensuring the high-quality and continuous integration of the supply model, and transitioning to scientific and modern management methods. Herein, we analyzed the "factor optimization-service optimization-governance optimization" development mechanism driven by digital intelligence and proposed corresponding implementation pathways, intending to provide references for establishing a high-quality and efficient healthcare service system with Chinese characteristics.}, } @article {pmid39508099, year = {2024}, author = {Fu, L and Gao, Y and Chen, Y and Wang, Y and Fang, X and Tian, S and Dong, H and Zhang, Y and Chen, Z and Wang, Z and Hu, S and Yi, X and Si, T}, title = {Critical Assessment of Protein Engineering (CAPE): A Student Challenge on the Cloud.}, journal = {ACS synthetic biology}, volume = {13}, number = {11}, pages = {3782-3787}, pmid = {39508099}, issn = {2161-5063}, mesh = {*Protein Engineering/methods ; *Cloud Computing ; *Students ; Machine Learning ; Algorithms ; Proteins/genetics/chemistry/metabolism ; Mutation ; }, abstract = {The success of AlphaFold in protein structure prediction highlights the power of data-driven approaches in scientific research. However, developing machine learning models to design and engineer proteins with desirable functions is hampered by limited access to high-quality data sets and experimental feedback. The Critical Assessment of Protein Engineering (CAPE) challenge addresses these issues through a student-focused competition, utilizing cloud computing and biofoundries to lower barriers to entry. CAPE serves as an open platform for community learning, where mutant data sets and design algorithms from past contestants help improve overall performance in subsequent rounds. Through two competition rounds, student participants collectively designed >1500 new mutant sequences, with the best-performing variants exhibiting catalytic activity up to 5-fold higher than the wild-type parent. We envision CAPE as a collaborative platform to engage young researchers and promote computational protein engineering.}, } @article {pmid39503000, year = {2024}, author = {Efstathiou, CI and Adams, E and Coats, CJ and Zelt, R and Reed, M and McGee, J and Foley, KM and Sidi, FI and Wong, DC and Fine, S and Arunachalam, S}, title = {Enabling high-performance cloud computing for the Community Multiscale Air Quality Model (CMAQ) version 5.3.3: performance evaluation and benefits for the user community.}, journal = {Geoscientific model development}, volume = {17}, number = {18}, pages = {7001-7027}, pmid = {39503000}, issn = {1991-9603}, support = {EP-C-16-014/EPA/EPA/United States ; EPA999999/ImEPA/Intramural EPA/United States ; }, abstract = {The Community Multiscale Air Quality Model (CMAQ) is a local- to hemispheric-scale numerical air quality modeling system developed by the U.S. Environmental Protection Agency (USEPA) and supported by the Community Modeling and Analysis System (CMAS) center. CMAQ is used for regulatory purposes by the USEPA program offices and state and local air agencies and is also widely used by the broader global research community to simulate and understand complex air quality processes and for computational environmental fate and transport and climate and health impact studies. Leveraging state-of-the-science cloud computing resources for high-performance computing (HPC) applications, CMAQ is now available as a fully tested, publicly available technology stack (HPC cluster and software stack) for two major cloud service providers (CSPs). Specifically, CMAQ configurations and supporting materials have been developed for use on their HPC clusters, including extensive online documentation, tutorials and guidelines to scale and optimize air quality simulations using their services. These resources allow modelers to rapidly bring together CMAQ, cloud-hosted datasets, and visualization and evaluation tools on ephemeral clusters that can be deployed quickly and reliably worldwide. Described here are considerations in CMAQ version 5.3.3 cloud use and the supported resources for each CSP, presented through a benchmark application suite that was developed as an example of a typical simulation for testing and verifying components of the modeling system. The outcomes of this effort are to provide findings from performing CMAQ simulations on the cloud using popular vendor-provided resources, to enable the user community to adapt this for their own needs, and to identify specific areas of potential optimization with respect to storage and compute architectures.}, } @article {pmid39494417, year = {2024}, author = {Shankar, GS and Onyema, EM and Kavin, BP and Gude, V and Prasad, BS}, title = {Breast Cancer Diagnosis Using Virtualization and Extreme Learning Algorithm Based on Deep Feed Forward Networks.}, journal = {Biomedical engineering and computational biology}, volume = {15}, number = {}, pages = {11795972241278907}, pmid = {39494417}, issn = {1179-5972}, abstract = {One of the leading causes of death for women worldwide is breast cancer. Early detection and prompt treatment can reduce the risk of breast cancer-related death. Cloud computing and machine learning are crucial for disease diagnosis today, but they are especially important for those who live in distant places with poor access to healthcare. While machine learning-based diagnosis tools act as primary readers and aid radiologists in correctly diagnosing diseases, cloud-based technology can also assist remote diagnostics and telemedicine services. The promise of techniques based on Artificial Neural Networks (ANN) for sickness diagnosis has attracted the attention of several re-searchers. The 4 methods for the proposed research include preprocessing, feature extraction, and classification. A Smart Window Vestige Deletion (SWVD) technique is initially suggested for preprocessing. It consists of Savitzky-Golay (S-G) smoothing, updated 2-stage filtering, and adaptive time window division. This technique separates each channel into multiple time periods by adaptively pre-analyzing its specificity. On each window, an altered 2-stage filtering process is then used to retrieve some tumor information. After applying S-G smoothing and integrating the broken time sequences, the process is complete. In order to deliver effective feature extraction, the Deep Residual based Multiclass for architecture (DRMFA) is used. In histological photos, identify characteristics at the cellular and tissue levels in both tiny and large size patches. Finally, a fresh customized strategy that combines a better crow forage-ELM. Deep learning and the Extreme Learning Machine (ELM) are concepts that have been developed (ACF-ELM). When it comes to diagnosing ailments, the cloud-based ELM performs similarly to certain cutting-edge technology. The cloud-based ELM approach beats alternative solutions, according to the DDSM and INbreast dataset results. Significant experimental results show that the accuracy for data inputs is 0.9845, the precision is 0.96, the recall is 0.94, and the F1 score is 0.95.}, } @article {pmid39493462, year = {2024}, author = {Ruiz-Rohena, K and Rodriguez-Martínez, M}, title = {ArcaDB: A Disaggregated Query Engine for Heterogenous Computational Environments.}, journal = {Proceedings. IEEE International Conference on Cloud Computing}, volume = {2024}, number = {}, pages = {42-53}, pmid = {39493462}, issn = {2159-6190}, support = {R15 LM012275/LM/NLM NIH HHS/United States ; }, abstract = {Modern enterprises rely on data management systems to collect, store, and analyze vast amounts of data related to their operations. Nowadays, clusters and hardware accelerators (e.g., GPUs, TPUs) have become a necessity to scale with the data processing demands in many applications related to social media, bioinformatics, surveillance systems, remote sensing, and medical informatics. Given this new scenario, the architecture of data analytics engines must evolve to take advantage of these new technological trends. In this paper, we present ArcaDB: a disaggregated query engine that leverages container technology to place operators at compute nodes that fit their performance profile. In ArcaDB, a query plan is dispatched to worker nodes that have different computing characteristics. Each operator is annotated with the preferred type of compute node for execution, and ArcaDB ensures that the operator gets picked up by the appropriate workers. We have implemented a prototype version of ArcaDB using Java, Python, and Docker containers. We have also completed a preliminary performance study of this prototype, using images and scientific data. This study shows that ArcaDB can speed up query performance by a factor of 3.5x in comparison with a shared-nothing, symmetric arrangement.}, } @article {pmid39493271, year = {2024}, author = {Liu, X and Chau, KY and Zheng, J and Deng, D and Tang, YM}, title = {Artificial intelligence approach for detecting and classifying abnormal behaviour in older adults using wearable sensors.}, journal = {Journal of rehabilitation and assistive technologies engineering}, volume = {11}, number = {}, pages = {20556683241288459}, pmid = {39493271}, issn = {2055-6683}, abstract = {The global population of older adults has increased, leading to a rising number of older adults in nursing homes without adequate care. This study proposes a smart wearable device for detecting and classifying abnormal behaviour in older adults in nursing homes. The device utilizes artificial intelligence technology to detect abnormal movements through behavioural data collection and target positioning. The intelligent recognition system and hardware sensors were tested using cloud computing and wireless sensor networks (WSNs), comparing their performance with other technologies through simulations. A triple-axis acceleration sensor collected motion behaviour data, and Zigbee enabled the wireless transfer of the sensor data. The Backpropagation (BP) neural network detected and classified abnormal behaviour based on simulated sensor data. The proposed smart wearable device offers indoor positioning, detection, and classification of abnormal behaviour. The embedded intelligent system detects routine motions like walking and abnormal behaviours such as falls. In emergencies, the system alerts healthcare workers for immediate safety measures. This study lays the groundwork for future AI-based technology implementation in nursing homes, advancing care for older adults.}, } @article {pmid39492606, year = {2025}, author = {Nam, SM and Byun, YH and Dho, YS and Park, CK}, title = {Envisioning the Future of the Neurosurgical Operating Room with the Concept of the Medical Metaverse.}, journal = {Journal of Korean Neurosurgical Society}, volume = {68}, number = {2}, pages = {137-149}, pmid = {39492606}, issn = {2005-3711}, support = {//Korea Medical Device Development Fund/ ; //Ministry of Science and ICT/ ; //Ministry of Trade, Industry and Energy/ ; //Ministry of Health and Welfare/ ; RS-2022-00197971//Ministry of Food and Drug Safety/ ; //Korea Health Industry Development Institute/ ; NCC-2411840-1//National Cancer Center/Republic of Korea ; }, abstract = {The medical metaverse can be defined as a virtual spatiotemporal framework wherein higher-dimensional medical information is generated, exchanged, and utilized through communication among medical personnel or patients. This occurs through the integration of cutting-edge technologies such as augmented reality (AR), virtual reality (VR), artificial intelligence (AI), big data, cloud computing, and others. We can envision a future neurosurgical operating room that utilizes such medical metaverse concept such as shared extended reality (AR/VR) of surgical field, AI-powered intraoperative neurophysiological monitoring, and real-time intraoperative tissue diagnosis. The future neurosurgical operation room will evolve into a true medical metaverse where participants of surgery can communicate in overlapping virtual layers of surgery, monitoring, and diagnosis.}, } @article {pmid39489808, year = {2024}, author = {Zong, B and Wu, S and Yang, Y and Li, Q and Tao, T and Mao, S}, title = {Smart Gas Sensors: Recent Developments and Future Prospective.}, journal = {Nano-micro letters}, volume = {17}, number = {1}, pages = {54}, pmid = {39489808}, issn = {2150-5551}, abstract = {Gas sensor is an indispensable part of modern society with wide applications in environmental monitoring, healthcare, food industry, public safety, etc. With the development of sensor technology, wireless communication, smart monitoring terminal, cloud storage/computing technology, and artificial intelligence, smart gas sensors represent the future of gas sensing due to their merits of real-time multifunctional monitoring, early warning function, and intelligent and automated feature. Various electronic and optoelectronic gas sensors have been developed for high-performance smart gas analysis. With the development of smart terminals and the maturity of integrated technology, flexible and wearable gas sensors play an increasing role in gas analysis. This review highlights recent advances of smart gas sensors in diverse applications. The structural components and fundamental principles of electronic and optoelectronic gas sensors are described, and flexible and wearable gas sensor devices are highlighted. Moreover, sensor array with artificial intelligence algorithms and smart gas sensors in "Internet of Things" paradigm are introduced. Finally, the challenges and perspectives of smart gas sensors are discussed regarding the future need of gas sensors for smart city and healthy living.}, } @article {pmid39472624, year = {2024}, author = {Cavus, N and Goksu, M and Oktekin, B}, title = {Real-time fake news detection in online social networks: FANDC Cloud-based system.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {25954}, pmid = {39472624}, issn = {2045-2322}, abstract = {Social networks have become a common way for people to communicate with each other and share ideas, thanks to their fast information-sharing features. But fake news spread on social networks can cause many negative consequences by affecting people's daily lives. However, the literature lacks online and real-time fake news detection systems. This study aims to fill this gap in the literature and to handle the fake news detection problem with a system called FANDC, based on cloud computing, to cope with fake news in seven different categories, and to solve the real-time fake news detection problems. The system was developed using the CRISP-DM methodology with a hybrid approach. BERT algorithm was used in the system running on the cloud to avoid possible cyber threats with the dataset created with approximately 99 million big data from COVID-19-TweetIDs GitHub repository. It was trained in two periods with 100% accuracy during the modeling phase in terms of training accuracy. Experimental results of the FANDC system performed the real-time detection of fake news at 99% accuracy. However, previous studies experimental level success rate in the literature, were around 90%. We hope that the developed system will greatly assist social network users in detecting fake news in real-time.}, } @article {pmid39472447, year = {2024}, author = {Wang, J and Choi, DH and LaRue, E and Atkins, JW and Foster, JR and Matthes, JH and Fahey, RT and Fei, S and Hardiman, BS}, title = {NEON-SD: A 30-m Structural Diversity Product Derived from the NEON Discrete-Return LiDAR Point Cloud.}, journal = {Scientific data}, volume = {11}, number = {1}, pages = {1174}, pmid = {39472447}, issn = {2052-4463}, support = {1926538//National Science Foundation (NSF)/ ; 1926454//National Science Foundation (NSF)/ ; 1926442//National Science Foundation (NSF)/ ; 1926538//National Science Foundation (NSF)/ ; 2023-68012-38992//United States Department of Agriculture | National Institute of Food and Agriculture (NIFA)/ ; 2023-68012-38992//United States Department of Agriculture | National Institute of Food and Agriculture (NIFA)/ ; }, abstract = {Structural diversity (SD) characterizes the volume and physical arrangement of biotic components in an ecosystem which control critical ecosystem functions and processes. LiDAR data provides detailed 3-D spatial position information of components and has been widely used to calculate SD. However, the intensive computation of SD metrics from extensive LiDAR datasets is time-consuming and challenging for researchers who lack access to high-performance computing resources. Moreover, a lack of understanding of LiDAR data and algorithms could lead to inconsistent SD metrics. Here, we developed a SD product using the Discrete-Return LiDAR Point Cloud from the NEON Aerial Observation Platform. This product provides SD metrics detailing height, density, openness, and complexity at a spatial resolution of 30 m, aligned to the Landsat grids, for 211 site-years for 45 Terrestrial NEON sites from 2013 to 2022. To accommodate various ecosystems with different understory heights, it includes three different cut-off heights (0.5 m, 2 m, and 5 m). This structural diversity product can enable various applications such as ecosystem productivity estimation and disturbance monitoring.}, } @article {pmid39471145, year = {2024}, author = {Khan, S and Khan, S and Waheed, A and Mehmood, G and Zareei, M and Alanazi, F}, title = {An optimized dynamic attribute-based searchable encryption scheme.}, journal = {PloS one}, volume = {19}, number = {10}, pages = {e0268803}, pmid = {39471145}, issn = {1932-6203}, mesh = {*Computer Security ; *Cloud Computing ; *Algorithms ; Humans ; }, abstract = {Cloud computing liberates enterprises and organizations from expensive data centers and complex IT infrastructures by offering the on-demand availability of vast storage and computing power over the internet. Among the many service models in practice, the public cloud for its operation cost saving, flexibility, and better customer support popularity in individuals and organizations. Nonetheless, this shift in the trusted domain from the concerned users to the third-party service providers pops up many privacy and security concerns. These concerns hindrance the wide adaptation for many of its potential applications. Furthermore, classical encryption techniques render the encrypted data useless for many of its valuable operations. The combined concept of attribute-based encryption (ABE) and searchable encryption (SE), commonly known as attribute-based keyword searching (ABKS), emerges as a promising technology for these concerns. However, most of the contemporary ABE-based keyword searching schemes incorporate costly pairing and computationally heavy secret sharing mechanisms for its realization. Our proposed scheme avoids the expensive bilinear pairing operation during the searching operation and costly Lagrange interpolation for secret reconstruction. Besides, our proposed scheme enables the updation of access control policy without entirely re-encrypting the ciphertext. The security of our scheme in the selective-set model is proved under the Decisional Bilinear Diffie-Hellmen (DBDH) assumption and collision-free. Finally, the experimental results and performance evaluation demonstrate its communication and overall efficiency.}, } @article {pmid39460192, year = {2024}, author = {Xiong, G and Guo, J}, title = {Contribution-Based Resource Allocation for Effective Federated Learning in UAV-Assisted Edge Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {20}, pages = {}, pmid = {39460192}, issn = {1424-8220}, abstract = {This paper considers UAVs as edge computing nodes and investigates a novel network resource allocation method for federated learning within a three-layer wireless network architecture containing cloud, edges (UAVs), and clients. To address the issue of fair bandwidth resource allocation among clients participating in federated learning, a contribution calculation strategy based on the Shapley value (SV) used as the weight for model aggregation is proposed. On this basis, a client selection and wireless resource allocation method based on model contribution is further designed. By reducing the training and aggregation frequency of the low-contribution clients during the asynchronous aggregation phase, the limited bandwidth resources are allocated to high-contribution clients, thus improving the convergence speed and accuracy of the global model. Simulation experiments demonstrate that the proposed method can significantly reduce the system delay and total energy consumption with gains between 15% and 50% while also improving the final accuracy of the global model by 0.3% and 2% on both short-term and long-term perspectives, respectively.}, } @article {pmid39460169, year = {2024}, author = {Hong, S and Park, S and Youn, H and Lee, J and Kwon, S}, title = {Implementation of Smart Farm Systems Based on Fog Computing in Artificial Intelligence of Things Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {20}, pages = {}, pmid = {39460169}, issn = {1424-8220}, support = {TP-2023-RS-2023-00258639//Ministry of Science and ICT/ ; 2021//Kwangwoon University/ ; }, abstract = {Cloud computing has recently gained widespread attention owing to its use in applications involving the Internet of Things (IoT). However, the transmission of massive volumes of data to a cloud server often results in overhead. Fog computing has emerged as a viable solution to address this issue. This study implements an Artificial Intelligence of Things (AIoT) system based on fog computing on a smart farm. Three experiments are conducted to evaluate the performance of the AIoT system. First, network traffic volumes between systems employing and not employing fog computing are compared. Second, the performance of the communication protocols-hypertext transport protocol (HTTP), message queuing telemetry transport protocol (MQTT), and constrained application protocol (CoAP)-commonly used in IoT applications is assessed. Finally, a convolutional neural network-based algorithm is introduced to determine the maturity level of coffee tree images. Experimental data are collected over ten days from a coffee tree farm in the Republic of Korea. Notably, the fog computing system demonstrates a 26% reduction in the cumulative data volume compared with a non-fog system. MQTT exhibits stable results in terms of the data volume and loss rate. Additionally, the maturity level determination algorithm performed on coffee fruits provides reliable results.}, } @article {pmid39460161, year = {2024}, author = {Molani, A and Pennati, F and Ravazzani, S and Scarpellini, A and Storti, FM and Vegetali, G and Paganelli, C and Aliverti, A}, title = {Advances in Portable Optical Microscopy Using Cloud Technologies and Artificial Intelligence for Medical Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {20}, pages = {}, pmid = {39460161}, issn = {1424-8220}, mesh = {*Microscopy/instrumentation/methods ; *Cloud Computing ; *Artificial Intelligence ; *Image Processing, Computer-Assisted/methods ; Humans ; Internet of Things ; Algorithms ; Deep Learning ; Smartphone ; }, abstract = {The need for faster and more accessible alternatives to laboratory microscopy is driving many innovations throughout the image and data acquisition chain in the biomedical field. Benchtop microscopes are bulky, lack communications capabilities, and require trained personnel for analysis. New technologies, such as compact 3D-printed devices integrated with the Internet of Things (IoT) for data sharing and cloud computing, as well as automated image processing using deep learning algorithms, can address these limitations and enhance the conventional imaging workflow. This review reports on recent advancements in microscope miniaturization, with a focus on emerging technologies such as photoacoustic microscopy and more established approaches like smartphone-based microscopy. The potential applications of IoT in microscopy are examined in detail. Furthermore, this review discusses the evolution of image processing in microscopy, transitioning from traditional to deep learning methods that facilitate image enhancement and data interpretation. Despite numerous advancements in the field, there is a noticeable lack of studies that holistically address the entire microscopy acquisition chain. This review aims to highlight the potential of IoT and artificial intelligence (AI) in combination with portable microscopy, emphasizing the importance of a comprehensive approach to the microscopy acquisition chain, from portability to image analysis.}, } @article {pmid39452059, year = {2024}, author = {Eliwa, EHI and Mohamed El Koshiry, A and Abd El-Hafeez, T and Omar, A}, title = {Secure and Transparent Lung and Colon Cancer Classification Using Blockchain and Microsoft Azure.}, journal = {Advances in respiratory medicine}, volume = {92}, number = {5}, pages = {395-420}, pmid = {39452059}, issn = {2543-6031}, support = {[Project No.: KFU241572]//King Faisal University/ ; }, mesh = {Humans ; *Lung Neoplasms/diagnostic imaging/classification/pathology ; *Colonic Neoplasms/diagnostic imaging/classification/pathology ; *Computer Security ; Blockchain ; Machine Learning ; Cloud Computing ; }, abstract = {BACKGROUND: The global healthcare system faces challenges in diagnosing and managing lung and colon cancers, which are significant health burdens. Traditional diagnostic methods are inefficient and prone to errors, while data privacy and security concerns persist.

OBJECTIVE: This study aims to develop a secure and transparent framework for remote consultation and classification of lung and colon cancer, leveraging blockchain technology and Microsoft Azure cloud services. Dataset and Features: The framework utilizes the LC25000 dataset, containing 25,000 histopathological images, for training and evaluating advanced machine learning models. Key features include secure data upload, anonymization, encryption, and controlled access via blockchain and Azure services.

METHODS: The proposed framework integrates Microsoft Azure's cloud services with a permissioned blockchain network. Patients upload CT scans through a mobile app, which are then preprocessed, anonymized, and stored securely in Azure Blob Storage. Blockchain smart contracts manage data access, ensuring only authorized specialists can retrieve and analyze the scans. Azure Machine Learning is used to train and deploy state-of-the-art machine learning models for cancer classification. Evaluation Metrics: The framework's performance is evaluated using metrics such as accuracy, precision, recall, and F1-score, demonstrating the effectiveness of the integrated approach in enhancing diagnostic accuracy and data security.

RESULTS: The proposed framework achieves an impressive accuracy of 100% for lung and colon cancer classification using DenseNet, ResNet50, and MobileNet models with different split ratios (70-30, 80-20, 90-10). The F1-score and k-fold cross-validation accuracy (5-fold and 10-fold) also demonstrate exceptional performance, with values exceeding 99.9%. Real-time notifications and secure remote consultations enhance the efficiency and transparency of the diagnostic process, contributing to better patient outcomes and streamlined cancer care management.}, } @article {pmid39443503, year = {2024}, author = {Murugesan, GK and McCrumb, D and Aboian, M and Verma, T and Soni, R and Memon, F and Farahani, K and Pei, L and Wagner, U and Fedorov, AY and Clunie, D and Moore, S and Van Oss, J}, title = {AI-Generated Annotations Dataset for Diverse Cancer Radiology Collections in NCI Image Data Commons.}, journal = {Scientific data}, volume = {11}, number = {1}, pages = {1165}, pmid = {39443503}, issn = {2052-4463}, support = {75N91019D00024/CA/NCI NIH HHS/United States ; U01 CA142565/CA/NCI NIH HHS/United States ; U01 CA151261/CA/NCI NIH HHS/United States ; UL1 TR001863/TR/NCATS NIH HHS/United States ; }, mesh = {Humans ; *Neoplasms/diagnostic imaging ; *National Cancer Institute (U.S.) ; United States ; Tomography, X-Ray Computed ; Magnetic Resonance Imaging ; Artificial Intelligence ; Positron-Emission Tomography ; Cloud Computing ; }, abstract = {The National Cancer Institute (NCI) Image Data Commons (IDC) offers publicly available cancer radiology collections for cloud computing, crucial for developing advanced imaging tools and algorithms. Despite their potential, these collections are minimally annotated; only 4% of DICOM studies in collections considered in the project had existing segmentation annotations. This project increases the quantity of segmentations in various IDC collections. We produced high-quality, AI-generated imaging annotations dataset of tissues, organs, and/or cancers for 11 distinct IDC image collections. These collections contain images from a variety of modalities, including computed tomography (CT), magnetic resonance imaging (MRI), and positron emission tomography (PET). The collections cover various body parts, such as the chest, breast, kidneys, prostate, and liver. A portion of the AI annotations were reviewed and corrected by a radiologist to assess the performance of the AI models. Both the AI's and the radiologist's annotations were encoded in conformance to the Digital Imaging and Communications in Medicine (DICOM) standard, allowing for seamless integration into the IDC collections as third-party analysis collections. All the models, images and annotations are publicly accessible.}, } @article {pmid39441878, year = {2024}, author = {Zhang, XY and Hong, JW}, title = {A dynamic authorizable ciphertext image retrieval algorithm based on security neural network inference.}, journal = {PloS one}, volume = {19}, number = {10}, pages = {e0309947}, pmid = {39441878}, issn = {1932-6203}, mesh = {*Computer Security ; *Algorithms ; *Neural Networks, Computer ; Information Storage and Retrieval/methods ; Image Processing, Computer-Assisted/methods ; Humans ; Cloud Computing ; }, abstract = {In this paper, we propose a dynamic authorizable ciphertext image retrieval scheme based on secure neural network inference that effectively enhances the security of image retrieval while preserving privacy. To ensure the privacy of the original image and enable feature extraction without decryption operations, we employ a secure neural network for feature extraction during the index construction stage of encrypted images. Additionally, we introduce a dynamic authenticatable ciphertext retrieval algorithm to enhance system flexibility and security by enabling users to quickly and flexibly retrieve authorized images. Experimental results demonstrate that our scheme guarantees data image privacy throughout the entire process from upload to retrieval compared to similar literature schemes. Furthermore, our scheme ensures data availability while maintaining security, allowing users to conveniently perform image retrieval operations. Although overall efficiency may not be optimal according to experimental results, our solution satisfies practical application needs in cloud computing environments by providing an efficient and secure image retrieval solution.}, } @article {pmid39438719, year = {2024}, author = {Nino Barreat, JG and Katzourakis, A}, title = {Deep mining reveals the diversity of endogenous viral elements in vertebrate genomes.}, journal = {Nature microbiology}, volume = {9}, number = {11}, pages = {3013-3024}, pmid = {39438719}, issn = {2058-5276}, support = {10010623//EC | EU Framework Programme for Research and Innovation H2020 | H2020 Priority Excellent Science | H2020 European Research Council (H2020 Excellent Science - European Research Council)/ ; }, mesh = {Animals ; *Vertebrates/virology ; *Phylogeny ; *Evolution, Molecular ; Humans ; Genome/genetics ; Genetic Variation ; Host Specificity ; Endogenous Retroviruses/genetics/classification ; }, abstract = {Integration of viruses into host genomes can give rise to endogenous viral elements (EVEs), which provide insights into viral diversity, host range and evolution. A systematic search for EVEs is becoming computationally challenging given the available genomic data. We used a cloud-computing approach to perform a comprehensive search for EVEs in the kingdoms Shotokuvirae and Orthornavirae across vertebrates. We identified 2,040 EVEs in 295 vertebrate genomes and provide evidence for EVEs belonging to the families Chuviridae, Paramyxoviridae, Nairoviridae and Benyviridae. We also find an EVE from the Hepacivirus genus of flaviviruses with orthology across murine rodents. In addition, our analyses revealed that reptarenaviruses and filoviruses probably acquired their glycoprotein ectodomains three times independently from retroviral elements. Taken together, these findings encourage the addition of 4 virus families and the Hepacivirus genus to the growing virus fossil record of vertebrates, providing key insights into their natural history and evolution.}, } @article {pmid39438519, year = {2024}, author = {Singh, P and Sagar, S and Singh, S and Alshahrani, HM and Getahun, M and Soufiene, BO}, title = {Blockchain-enabled verification of medical records using soul-bound tokens and cloud computing.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {24830}, pmid = {39438519}, issn = {2045-2322}, abstract = {The Crucial and costly process of verifying medical documents frequently depends on centralized databases. Nevertheless, manual validation of document verification wastes a great deal of time and energy. The application of Blockchain technology could potentially alleviate the problem by reducing fraud and increasing efficiency. Non-transferable Soul-bound tokens (SBTs) can be a safe and unbreakable way to authenticate medical records by generating encrypted code, which allows the user to authenticate a portion of data. Within the paper, we provide a blockchain-based SBT-based automatic mechanism for authentication and verification of records. Soul-bound tokens generate a decentralized, immutable identity or credential system that is tied to a record. Through cloud computing, the system can reduce the verification time by accessing a decentralized database. Blockchain systems can lower platform costs and determine the optimal allocation of resources across a dispersed network by utilizing deep learning algorithms. Two advantages of utilizing blockchain technology are less fraud and increased efficiency. SBTs and cloud computing enable the procedure to be expedited and decentralized databases to be readily available. The suggested system's scalability and potential uses in other industries may be the subject of future research.}, } @article {pmid39433933, year = {2024}, author = {Alsadie, D and Alsulami, M}, title = {Enhancing workflow efficiency with a modified Firefly Algorithm for hybrid cloud edge environments.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {24675}, pmid = {39433933}, issn = {2045-2322}, abstract = {Efficient scheduling of scientific workflows in hybrid cloud-edge environments is crucial for optimizing resource utilization and minimizing completion time. In this study, we evaluate various scheduling algorithms, emphasizing the Modified Firefly Optimization Algorithm (ModFOA) and comparing it with established methods such as Ant Colony Optimization (ACO), Genetic Algorithm (GA), and Particle Swarm Optimization (PSO). We investigate key performance metrics, including makespan, resource utilization, and energy consumption, across both cloud and edge configurations. Scientific workflows often involve complex tasks with dependencies, which can challenge traditional scheduling algorithms. While existing methods show promise, they may not fully address the unique demands of hybrid cloud-edge environments, potentially leading to suboptimal outcomes. Our proposed ModFOA integrates cloud and edge computing resources, offering an effective solution for scheduling workflows in these hybrid environments. Through comparative analysis, ModFOA demonstrates improved performance in reducing makespan and completion times, while maintaining competitive resource utilization and energy efficiency. This study highlights the importance of incorporating cloud-edge integration in scheduling algorithms and showcases ModFOA's potential to enhance workflow efficiency and resource management across hybrid environments. Future research should focus on refining ModFOA's parameters and validating its effectiveness in practical hybrid cloud-edge scenarios.}, } @article {pmid39431777, year = {2024}, author = {Bylaska, EJ and Panyala, A and Bauman, NP and Peng, B and Pathak, H and Mejia-Rodriguez, D and Govind, N and Williams-Young, DB and Aprà, E and Bagusetty, A and Mutlu, E and Jackson, KA and Baruah, T and Yamamoto, Y and Pederson, MR and Withanage, KPK and Pedroza-Montero, JN and Bilbrey, JA and Choudhury, S and Firoz, J and Herman, KM and Xantheas, SS and Rigor, P and Vila, FD and Rehr, JJ and Fung, M and Grofe, A and Johnston, C and Baker, N and Kaneko, K and Liu, H and Kowalski, K}, title = {Electronic structure simulations in the cloud computing environment.}, journal = {The Journal of chemical physics}, volume = {161}, number = {15}, pages = {}, doi = {10.1063/5.0226437}, pmid = {39431777}, issn = {1089-7690}, abstract = {The transformative impact of modern computational paradigms and technologies, such as high-performance computing (HPC), quantum computing, and cloud computing, has opened up profound new opportunities for scientific simulations. Scalable computational chemistry is one beneficiary of this technological progress. The main focus of this paper is on the performance of various quantum chemical formulations, ranging from low-order methods to high-accuracy approaches, implemented in different computational chemistry packages and libraries, such as NWChem, NWChemEx, Scalable Predictive Methods for Excitations and Correlated Phenomena, ExaChem, and Fermi-Löwdin orbital self-interaction correction on Azure Quantum Elements, Microsoft's cloud services platform for scientific discovery. We pay particular attention to the intricate workflows for performing complex chemistry simulations, associated data curation, and mechanisms for accuracy assessment, which is demonstrated with the Arrows automated workflow for high throughput simulations. Finally, we provide a perspective on the role of cloud computing in supporting the mission of leadership computational facilities.}, } @article {pmid39416276, year = {2024}, author = {Gasset, A and Van Wijngaarden, J and Mirabent, F and Sales-Vallverdú, A and Garcia-Ortega, X and Montesinos-Seguí, JL and Manzano, T and Valero, F}, title = {Continuous Process Verification 4.0 application in upstream: adaptiveness implementation managed by AI in the hypoxic bioprocess of the Pichia pastoris cell factory.}, journal = {Frontiers in bioengineering and biotechnology}, volume = {12}, number = {}, pages = {1439638}, pmid = {39416276}, issn = {2296-4185}, abstract = {The experimental approach developed in this research demonstrated how the cloud, the Internet of Things (IoT), edge computing, and Artificial Intelligence (AI), considered key technologies in Industry 4.0, provide the expected horizon for adaptive vision in Continued Process Verification (CPV), the final stage of Process Validation (PV). Pichia pastoris producing Candida rugosa lipase 1 under the regulation of the constitutive GAP promoter was selected as an experimental bioprocess. The bioprocess worked under hypoxic conditions in carbon-limited fed-batch cultures through a physiological control based on the respiratory quotient (RQ). In this novel bioprocess, a digital twin (DT) was built and successfully tested. The implementation of online sensors worked as a bridge between the microorganism and AI models, to provide predictions from the edge and the cloud. AI models emulated the metabolism of Pichia based on critical process parameters and actionable factors to achieve the expected quality attributes. This innovative AI-aided Adaptive-Proportional Control strategy (AI-APC) improved the reproducibility comparing to a Manual-Heuristic Control strategy (MHC), showing better performance than the Boolean-Logic-Controller (BLC) tested. The accuracy, indicated by the Mean Relative Error (MRE), was for the AI-APC lower than 4%, better than the obtained for MHC (10%) and BLC (5%). Moreover, in terms of precision, the same trend was observed when comparing the Root Mean Square Deviation (RMSD) values, becoming lower as the complexity of the controller increases. The successful automatic real time control of the bioprocess orchestrated by AI models proved the 4.0 capabilities brought by the adaptive concept and its validity in biopharmaceutical upstream operations.}, } @article {pmid39411512, year = {2024}, author = {Schlegel, BT and Morikone, M and Mu, F and Tang, WY and Kohanbash, G and Rajasundaram, D}, title = {bcRflow: a Nextflow pipeline for characterizing B cell receptor repertoires from non-targeted transcriptomic data.}, journal = {NAR genomics and bioinformatics}, volume = {6}, number = {4}, pages = {lqae137}, pmid = {39411512}, issn = {2631-9268}, support = {S10 OD028483/OD/NIH HHS/United States ; }, abstract = {B cells play a critical role in the adaptive recognition of foreign antigens through diverse receptor generation. While targeted immune sequencing methods are commonly used to profile B cell receptors (BCRs), they have limitations in cost and tissue availability. Analyzing B cell receptor profiling from non-targeted transcriptomics data is a promising alternative, but a systematic pipeline integrating tools for accurate immune repertoire extraction is lacking. Here, we present bcRflow, a Nextflow pipeline designed to characterize BCR repertoires from non-targeted transcriptomics data, with functional modules for alignment, processing, and visualization. bcRflow is a comprehensive, reproducible, and scalable pipeline that can run on high-performance computing clusters, cloud-based computing resources like Amazon Web Services (AWS), the Open OnDemand framework, or even local desktops. bcRflow utilizes institutional configurations provided by nf-core to ensure maximum portability and accessibility. To demonstrate the functionality of the bcRflow pipeline, we analyzed a public dataset of bulk transcriptomic samples from COVID-19 patients and healthy controls. We have shown that bcRflow streamlines the analysis of BCR repertoires from non-targeted transcriptomics data, providing valuable insights into the B cell immune response for biological and clinical research. bcRflow is available at https://github.com/Bioinformatics-Core-at-Childrens/bcRflow.}, } @article {pmid39411450, year = {2024}, author = {Karkar, S and Sharma, A and Herrmann, C and Blum, Y and Richard, M}, title = {DECOMICS, a shiny application for unsupervised cell type deconvolution and biological interpretation of bulk omic data.}, journal = {Bioinformatics advances}, volume = {4}, number = {1}, pages = {vbae136}, pmid = {39411450}, issn = {2635-0041}, abstract = {SUMMARY: Unsupervised deconvolution algorithms are often used to estimate cell composition from bulk tissue samples. However, applying cell-type deconvolution and interpreting the results remain a challenge, even more without prior training in bioinformatics. Here, we propose a tool for estimating and identifying cell type composition from bulk transcriptomes or methylomes. DECOMICS is a shiny-web application dedicated to unsupervised deconvolution approaches of bulk omic data. It provides (i) a variety of existing algorithms to perform deconvolution on the gene expression or methylation-level matrix, (ii) an enrichment analysis module to aid biological interpretation of the deconvolved components, based on enrichment analysis, and (iii) some visualization tools. Input data can be downloaded in csv format and preprocessed in the web application (normalization, transformation, and feature selection). The results of the deconvolution, enrichment, and visualization processes can be downloaded.

DECOMICS is an R-shiny web application that can be launched (i) directly from a local R session using the R package available here: https://gitlab.in2p3.fr/Magali.Richard/decomics (either by installing it locally or via a virtual machine and a Docker image that we provide); or (ii) in the Biosphere-IFB Clouds Federation for Life Science, a multi-cloud environment scalable for high-performance computing: https://biosphere.france-bioinformatique.fr/catalogue/appliance/193/.}, } @article {pmid39410638, year = {2024}, author = {Hsu, WS and Liu, GT and Chen, SJ and Wei, SY and Wang, WH}, title = {An Automated Clubbed Fingers Detection System Based on YOLOv8 and U-Net: A Tool for Early Prediction of Lung and Cardiovascular Diseases.}, journal = {Diagnostics (Basel, Switzerland)}, volume = {14}, number = {19}, pages = {}, pmid = {39410638}, issn = {2075-4418}, abstract = {Background/Objectives: Lung and cardiovascular diseases are leading causes of mortality worldwide, yet early detection remains challenging due to the subtle symptoms. Digital clubbing, characterized by the bulbous enlargement of the fingertips, serves as an early indicator of these diseases. This study aims to develop an automated system for detecting digital clubbing using deep-learning models for real-time monitoring and early intervention. Methods: The proposed system utilizes the YOLOv8 model for object detection and U-Net for image segmentation, integrated with the ESP32-CAM development board to capture and analyze finger images. The severity of digital clubbing is determined using a custom algorithm based on the Lovibond angle theory, categorizing the condition into normal, mild, moderate, and severe. The system was evaluated using 1768 images and achieved cloud-based and real-time processing capabilities. Results: The system demonstrated high accuracy (98.34%) in real-time detection with precision (98.22%), sensitivity (99.48%), and specificity (98.22%). Cloud-based processing achieved slightly lower but robust results, with an accuracy of 96.38%. The average processing time was 0.15 s per image, showcasing its real-time potential. Conclusions: This automated system provides a scalable and cost-effective solution for the early detection of digital clubbing, enabling timely intervention for lung and cardiovascular diseases. Its high accuracy and real-time capabilities make it suitable for both clinical and home-based health monitoring.}, } @article {pmid39409439, year = {2024}, author = {Ruf, B and Weinmann, M and Hinz, S}, title = {FaSS-MVS: Fast Multi-View Stereo with Surface-Aware Semi-Global Matching from UAV-Borne Monocular Imagery.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {19}, pages = {}, pmid = {39409439}, issn = {1424-8220}, abstract = {With FaSS-MVS, we present a fast, surface-aware semi-global optimization approach for multi-view stereo that allows for rapid depth and normal map estimation from monocular aerial video data captured by unmanned aerial vehicles (UAVs). The data estimated by FaSS-MVS, in turn, facilitate online 3D mapping, meaning that a 3D map of the scene is immediately and incrementally generated as the image data are acquired or being received. FaSS-MVS is composed of a hierarchical processing scheme in which depth and normal data, as well as corresponding confidence scores, are estimated in a coarse-to-fine manner, allowing efficient processing of large scene depths, such as those inherent in oblique images acquired by UAVs flying at low altitudes. The actual depth estimation uses a plane-sweep algorithm for dense multi-image matching to produce depth hypotheses from which the actual depth map is extracted by means of a surface-aware semi-global optimization, reducing the fronto-parallel bias of Semi-Global Matching (SGM). Given the estimated depth map, the pixel-wise surface normal information is then computed by reprojecting the depth map into a point cloud and computing the normal vectors within a confined local neighborhood. In a thorough quantitative and ablative study, we show that the accuracy of the 3D information computed by FaSS-MVS is close to that of state-of-the-art offline multi-view stereo approaches, with the error not even an order of magnitude higher than that of COLMAP. At the same time, however, the average runtime of FaSS-MVS for estimating a single depth and normal map is less than 14% of that of COLMAP, allowing us to perform online and incremental processing of full HD images at 1-2 Hz.}, } @article {pmid39409330, year = {2024}, author = {Lin, HY and Chen, PR}, title = {Revocable and Fog-Enabled Proxy Re-Encryption Scheme for IoT Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {19}, pages = {}, pmid = {39409330}, issn = {1424-8220}, abstract = {As technology advances rapidly, a diverse array of Internet of Things (IoT) devices finds widespread application across numerous fields. The intelligent nature of these devices not only gives people more convenience, but also introduces new challenges especially in security when transmitting data in fog-based cloud environments. In fog computing environments, data need to be transmitted across multiple devices, increasing the risk of data being intercepted or tampered with during transmission. To securely share cloud ciphertexts, an alleged proxy re-encryption approach is a commonly adopted solution. Without decrypting the original ciphertext, such a mechanism permits a ciphertext intended for user A to be easily converted into the one intended for user B. However, to revoke the decryption privilege of data users usually relies on the system authority to maintain a user revocation list which inevitably increases the storage space. In this research, the authors come up with a fog-based proxy re-encryption system with revocable identity. Without maintaining the traditional user revocation list, the proposed scheme introduces a time-updated key mechanism. The time-update key could be viewed as a partial private key and should be renewed with different time periods. A revoked user is unable to obtain the renewed time-update key and hence cannot share or decrypt cloud ciphertexts. We formally demonstrate that the introduced scheme satisfies the security of indistinguishability against adaptively chosen identity and chosen plaintext attacks (IND-PrID-CPA) assuming the hardness of the Decisional Bilinear Diffie-Hellman (DBDH) problem in the random oracle model. Furthermore, compared with similar systems, the proposed one also has lower computational complexity as a whole.}, } @article {pmid39402335, year = {2024}, author = {Moriya, T and Yamada, Y and Yamamoto, M and Senda, T}, title = {GoToCloud optimization of cloud computing environment for accelerating cryo-EM structure-based drug design.}, journal = {Communications biology}, volume = {7}, number = {1}, pages = {1320}, pmid = {39402335}, issn = {2399-3642}, support = {JP23ama121001//Japan Agency for Medical Research and Development (AMED)/ ; JP20K15735//MEXT | Japan Society for the Promotion of Science (JSPS)/ ; JP23H02427//MEXT | Japan Society for the Promotion of Science (JSPS)/ ; }, mesh = {*Cryoelectron Microscopy/methods ; *Cloud Computing ; *Drug Design ; }, abstract = {Cryogenic electron microscopy (Cryo-EM) is a widely used technique for visualizing the 3D structures of many drug design targets, including membrane proteins, at atomic resolution. However, the necessary throughput for structure-based drug design (SBDD) is not yet achieved. Currently, data analysis is a major bottleneck due to the rapid advancements in detector technology and image acquisition methods. Here we show "GoToCloud", a cloud-computing-based platform for advanced data analysis and data management in Cryo-EM. With GoToCloud, it is possible to optimize computing resources and reduce costs by selecting the most appropriate parallel processing settings for each processing step. Our benchmark tests on GoToCloud demonstrate that parallel computing settings, including the choice of computational hardware, as well as a required target resolution have significant impacts on the processing time and cost performance. Through this optimization of a cloud computing environment, GoToCloud emerges as a promising platform for the acceleration of Cryo-EM SBDD.}, } @article {pmid39396423, year = {2024}, author = {Du, X and Novoa-Laurentiev, J and Plasek, JM and Chuang, YW and Wang, L and Marshall, GA and Mueller, SK and Chang, F and Datta, S and Paek, H and Lin, B and Wei, Q and Wang, X and Wang, J and Ding, H and Manion, FJ and Du, J and Bates, DW and Zhou, L}, title = {Enhancing early detection of cognitive decline in the elderly: a comparative study utilizing large language models in clinical notes.}, journal = {EBioMedicine}, volume = {109}, number = {}, pages = {105401}, pmid = {39396423}, issn = {2352-3964}, support = {R01 AG080429/AG/NIA NIH HHS/United States ; R44 AG081006/AG/NIA NIH HHS/United States ; }, mesh = {Humans ; *Cognitive Dysfunction/diagnosis ; Aged ; Female ; Male ; *Electronic Health Records ; Early Diagnosis ; Aged, 80 and over ; Middle Aged ; Natural Language Processing ; }, abstract = {BACKGROUND: Large language models (LLMs) have shown promising performance in various healthcare domains, but their effectiveness in identifying specific clinical conditions in real medical records is less explored. This study evaluates LLMs for detecting signs of cognitive decline in real electronic health record (EHR) clinical notes, comparing their error profiles with traditional models. The insights gained will inform strategies for performance enhancement.

METHODS: This study, conducted at Mass General Brigham in Boston, MA, analysed clinical notes from the four years prior to a 2019 diagnosis of mild cognitive impairment in patients aged 50 and older. We developed prompts for two LLMs, Llama 2 and GPT-4, on Health Insurance Portability and Accountability Act (HIPAA)-compliant cloud-computing platforms using multiple approaches (e.g., hard prompting, retrieval augmented generation, and error analysis-based instructions) to select the optimal LLM-based method. Baseline models included a hierarchical attention-based neural network and XGBoost. Subsequently, we constructed an ensemble of the three models using a majority vote approach. Confusion-matrix-based scores were used for model evaluation.

FINDINGS: We used a randomly annotated sample of 4949 note sections from 1969 patients (women: 1046 [53.1%]; age: mean, 76.0 [SD, 13.3] years), filtered with keywords related to cognitive functions, for model development. For testing, a random annotated sample of 1996 note sections from 1161 patients (women: 619 [53.3%]; age: mean, 76.5 [SD, 10.2] years) without keyword filtering was utilised. GPT-4 demonstrated superior accuracy and efficiency compared to Llama 2, but did not outperform traditional models. The ensemble model outperformed the individual models in terms of all evaluation metrics with statistical significance (p < 0.01), achieving a precision of 90.2% [95% CI: 81.9%-96.8%], a recall of 94.2% [95% CI: 87.9%-98.7%], and an F1-score of 92.1% [95% CI: 86.8%-96.4%]. Notably, the ensemble model showed a significant improvement in precision, increasing from a range of 70%-79% to above 90%, compared to the best-performing single model. Error analysis revealed that 63 samples were incorrectly predicted by at least one model; however, only 2 cases (3.2%) were mutual errors across all models, indicating diverse error profiles among them.

INTERPRETATION: LLMs and traditional machine learning models trained using local EHR data exhibited diverse error profiles. The ensemble of these models was found to be complementary, enhancing diagnostic performance. Future research should investigate integrating LLMs with smaller, localised models and incorporating medical data and domain knowledge to enhance performance on specific tasks.

FUNDING: This research was supported by the National Institute on Aging grants (R44AG081006, R01AG080429) and National Library of Medicine grant (R01LM014239).}, } @article {pmid39396229, year = {2024}, author = {Sivakumar, TB and Hasan Hussain, SH and Balamanigandan, R}, title = {Internet of Things and Cloud Computing-based Disease Diagnosis using Optimized Improved Generative Adversarial Network in Smart Healthcare System.}, journal = {Network (Bristol, England)}, volume = {}, number = {}, pages = {1-24}, doi = {10.1080/0954898X.2024.2392770}, pmid = {39396229}, issn = {1361-6536}, abstract = {The integration of IoT and cloud services enhances communication and quality of life, while predictive analytics powered by AI and deep learning enables proactive healthcare. Deep learning, a subset of machine learning, efficiently analyzes vast datasets, offering rapid disease prediction. Leveraging recurrent neural networks on electronic health records improves accuracy for timely intervention and preventative care. In this manuscript, Internet of Things and Cloud Computing-based Disease Diagnosis using Optimized Improved Generative Adversarial Network in Smart Healthcare System (IOT-CC-DD-OICAN-SHS) is proposed. Initially, an Internet of Things (IoT) device collects diabetes, chronic kidney disease, and heart disease data from patients via wearable devices and intelligent sensors and then saves the patient's large data in the cloud. These cloud data are pre-processed to turn them into a suitable format. The pre-processed dataset is sent into the Improved Generative Adversarial Network (IGAN), which reliably classifies the data as disease-free or diseased. Then, IGAN was optimized using the Flamingo Search optimization algorithm (FSOA). The proposed technique is implemented in Java using Cloud Sim and examined utilizing several performance metrics. The proposed method attains greater accuracy and specificity with lower execution time compared to existing methodologies, IoT-C-SHMS-HDP-DL, PPEDL-MDTC and CSO-CLSTM-DD-SHS respectively.}, } @article {pmid39386875, year = {2024}, author = {Rashmi, S and Siwach, V and Sehrawat, H and Brar, GS and Singla, J and Jhanjhi, NZ and Masud, M and Shorfuzzaman, M}, title = {AI-powered VM selection: Amplifying cloud performance with dragonfly algorithm.}, journal = {Heliyon}, volume = {10}, number = {19}, pages = {e37912}, pmid = {39386875}, issn = {2405-8440}, abstract = {The convenience and cost-effectiveness offered by cloud computing have attracted a large customer base. In a cloud environment, the inclusion of the concept of virtualization requires careful management of resource utilization and energy consumption. With a rapidly increasing consumer base of cloud data centers, it faces an overwhelming influx of Virtual Machine (VM) requests. In cloud computing technology, the mapping of these requests onto the actual cloud hardware is known as VM placement which is a significant area of research. The article presents the Dragonfly Algorithm integrated with Modified Best Fit Decreasing (DA-MBFD) is proposed to minimize the overall power consumption and the migration count. DA-MBFD uses MBFD for ranking VMs based on their resource requirement, then uses the Minimization of Migration (MM) algorithm for hotspot detection followed by DA to optimize the replacement of VMs from the overutilized hosts. DA-MBFD is compared with a few of the other existing techniques to show its efficiency. The comparative analysis of DA-MBFD against E-ABC, E-MBFD, and MBFD-MM shows %improvement reflecting a significant reduction in power consumption 8.21 %, 8.6 %, 6.77 %, violations in service level agreement from 9.25 %, 6.98 %-7.86 % and number of migrations 6.65 %, 8.92 %, 7.02 %, respectively.}, } @article {pmid39381918, year = {2024}, author = {Vhatkar, K and Kathole, AB and Lonare, S and Katti, J and Kimbahune, VV}, title = {Designing an optimal task scheduling and VM placement in the cloud environment with multi-objective constraints using Hybrid Lemurs and Gannet Optimization Algorithm.}, journal = {Network (Bristol, England)}, volume = {}, number = {}, pages = {1-31}, doi = {10.1080/0954898X.2024.2412678}, pmid = {39381918}, issn = {1361-6536}, abstract = {An efficient resource utilization method can greatly reduce expenses and unwanted resources. Typical cloud resource planning approaches lack support for the emerging paradigm regarding asset management speed and optimization. The use of cloud computing relies heavily on task planning and allocation of resources. The task scheduling issue is more crucial in arranging and allotting application jobs supplied by customers on Virtual Machines (VM) in a specific manner. The task planning issue needs to be specifically stated to increase scheduling efficiency. The task scheduling in the cloud environment model is developed using optimization techniques. This model intends to optimize both the task scheduling and VM placement over the cloud environment. In this model, a new hybrid-meta-heuristic optimization algorithm is developed named the Hybrid Lemurs-based Gannet Optimization Algorithm (HL-GOA). The multi-objective function is considered with constraints like cost, time, resource utilization, makespan, and throughput. The proposed model is further validated and compared against existing methodologies. The total time required for scheduling and VM placement is 30.23%, 6.25%, 11.76%, and 10.44% reduced than ESO, RSO, LO, and GOA with 2 VMs. The simulation outcomes revealed that the developed model effectively resolved the scheduling and VL placement issues.}, } @article {pmid39376084, year = {2024}, author = {Lei, M and Matukumalli, LK and Arora, K and Weber, N and Malashock, R and Mao, F and Gregurick, S and Lorsch, J}, title = {NIGMS Sandbox: a learning platform toward democratizing cloud computing for biomedical research.}, journal = {Briefings in bioinformatics}, volume = {25}, number = {Supplement_1}, pages = {}, pmid = {39376084}, issn = {1477-4054}, support = {//NIH/ ; }, mesh = {*Biomedical Research ; *Cloud Computing ; Humans ; Big Data ; Computational Biology/methods/education ; Software ; United States ; }, abstract = {Biomedical data are growing exponentially in both volume and levels of complexity, due to the rapid advancement of technologies and research methodologies. Analyzing these large datasets, referred to collectively as "big data," has become an integral component of research that guides experimentation-driven discovery and a new engine of discovery itself as it uncovers previously unknown connections through mining of existing data. To fully realize the potential of big data, biomedical researchers need access to high-performance-computing (HPC) resources. However, supporting on-premises infrastructure that keeps up with these consistently expanding research needs presents persistent financial and staffing challenges, even for well-resourced institutions. For other institutions, including primarily undergraduate institutions and minority serving institutions, that educate a large portion of the future workforce in the USA, this challenge presents an insurmountable barrier. Therefore, new approaches are needed to provide broad and equitable access to HPC resources to biomedical researchers and students who will advance biomedical research in the future.}, } @article {pmid39364775, year = {2024}, author = {Balasubramaniam, NK and Penberthy, S and Fenyo, D and Viessmann, N and Russmann, C and Borchers, CH}, title = {Digitalomics - digital transformation leading to omics insights.}, journal = {Expert review of proteomics}, volume = {21}, number = {9-10}, pages = {337-344}, doi = {10.1080/14789450.2024.2413107}, pmid = {39364775}, issn = {1744-8387}, mesh = {Humans ; *Precision Medicine/methods ; Biomarkers/metabolism ; Genomics/methods ; Artificial Intelligence ; Proteomics/methods ; Neoplasms/genetics/metabolism ; }, abstract = {INTRODUCTION: Biomarker discovery is increasingly moving from single omics to multiomics, as well as from multi-cell omics to single-cell omics. These transitions have increasingly adopted digital transformation technologies to accelerate the progression from data to insight. Here, we will discuss the concept of 'digitalomics' and how digital transformation directly impacts biomarker discovery. This will ultimately assist clinicians in personalized therapy and precision-medicine treatment decisions.

AREAS COVERED: Genotype-to-phenotype-based insight generation involves integrating large amounts of complex multiomic data. This data integration and analysis is aided through digital transformation, leading to better clinical outcomes. We also highlight the challenges and opportunities of Digitalomics, and provide examples of the application of Artificial Intelligence, cloud- and high-performance computing, and use of tensors for multiomic analysis workflows.

EXPERT OPINION: Biomarker discovery, aided by digital transformation, is having a significant impact on cancer, cardiovascular, infectious, immunological, and neurological diseases, among others. Data insights garnered from multiomic analyses, combined with patient meta data, aids patient stratification and targeted treatment across a broad spectrum of diseases. Digital transformation offers time and cost savings while leading to improved patent healthcare. Here, we highlight the impact of digital transformation on multiomics- based biomarker discovery with specific applications related to oncology.}, } @article {pmid39362158, year = {2024}, author = {Crivellaro, M and Serrao, L and Bertoldi, W and Bizzi, S and Vitti, A and Hauer, C and Skrame, K and Cekrezi, B and Zolezzi, G}, title = {Multiscale morphological trajectories to support management of free-flowing rivers: the Vjosa in South-East Europe.}, journal = {Journal of environmental management}, volume = {370}, number = {}, pages = {122541}, doi = {10.1016/j.jenvman.2024.122541}, pmid = {39362158}, issn = {1095-8630}, mesh = {*Rivers ; Ecosystem ; Europe ; Conservation of Natural Resources ; Environmental Monitoring ; Europe, Eastern ; }, abstract = {Free-flowing rivers (FFRs) are fundamental references for river management, providing the opportunity to investigate river functioning under minimal anthropic disturbance. However, large free-flowing rivers are rare in Europe and worldwide, and knowledge of their dynamics is often scarce due to a lack of data and baseline studies. So far, their characterization is mainly grounded in the longitudinal connectivity assessment, with scarce integration of further hydro-morphological aspects, particularly concerning the processes and drivers of changes in their morphology over time scales of management relevance. This work aims to broaden the characterization of FFRs by reconstructing their catchment-scale morphological evolutionary trajectories and understanding their driving causes, to support their management better. This is achieved by integrating freely available global data including Landsat imagery and climatic reanalysis with the few locally available quantitative and qualitative information. The analysis of possible drivers of change at the catchment and reach scale assesses hydrological variability, flow regulation, land use change, sediment mining and bank protection works. We applied this approach to the Vjosa River (Albania), a model ecosystem of European significance and one of the few FFRs in Europe. The Vjosa was recently declared a Wild River National Park. We investigated its catchment-scale morphological changes over 50 years, considering four reaches of the Vjosa and four reaches of its main tributaries. Satellite imagery was analyzed taking advantage of Google Earth Engine cloud computing platform. The analysis reveals a catchment-scale response to climatic fluctuations, especially in the most natural reaches, with a significant narrowing of the active river corridor, following a flood-intense period in the early 1960s. The narrowing rate gradually decreased, from 35% before 1985 to 24% between 1985 and 2000, reaching a new equilibrium from 2000 to 2020. However, the recent trajectories of the lowland reaches have been impacted by human pressures, particularly sediment mining, which intensified after the 1990s, suggesting that these reaches may instead be far from equilibrium and adjusting to such persistent stressor. Identifying the key drivers of change and building catchment-scale knowledge of geomorphic change can inform the management of riverine protected areas, and the proposed integrated approach is a promising tool to help overcome the data scarcity typical of the limited remaining large FFRs.}, } @article {pmid39348369, year = {2024}, author = {Alwageed, HS and Keshta, I and Khan, RA and Alzahrani, A and Tariq, MU and Ghani, A}, title = {An empirical study for mitigating sustainable cloud computing challenges using ISM-ANN.}, journal = {PloS one}, volume = {19}, number = {9}, pages = {e0308971}, doi = {10.1371/journal.pone.0308971}, pmid = {39348369}, issn = {1932-6203}, mesh = {*Cloud Computing ; *Computer Security ; *Neural Networks, Computer ; Humans ; Surveys and Questionnaires ; Sustainable Development ; }, abstract = {The significance of cloud computing methods in everyday life is growing as a result of the exponential advancement and refinement of artificial technology. As cloud computing makes more progress, it will bring with it new opportunities and threats that affect the long-term health of society and the environment. Many questions remain unanswered regarding sustainability, such as, "How will widely available computing systems affect environmental equilibrium"? When hundreds of millions of microcomputers are invisible to each other, what will society look like? What does this mean for social sustainability? This paper empirically investigates the ethical challenges and practices of cloud computing about sustainable development. We conducted a systematic literature review followed by a questionnaire survey and identified 11 sustainable cloud computing challenges (SCCCs) and 66 practices for addressing the identified challenges. Interpretive structural modeling (ISM) and Artificial Neural Networks (ANN) were then used to identify and analyze the interrelationship between the SCCCs. Then, based on the results of the ISM, 11 process areas were determined to develop the proposed sustainable cloud computing challenges mitigation model (SCCCMM). The SCCCMM includes four main categories: Requirements specification, Quality of Service (QoS) and Service Legal Agreement (SLA), Complexity and Cyber security, and Trust. The model was subsequently tested with a real-world case study that was connected to the environment. In a sustainable cloud computing organization, the results demonstrate that the proposed SCCCMM aids in estimating the level of mitigation. The participants in the case study also appreciated the suggested SCCCMM for its practicality, user-friendliness, and overall usefulness. When it comes to the sustainability of their software products, we believe that organizations involved in cloud computing can benefit from the suggested SCCCMM. Additionally, researchers and industry practitioners can expect the proposed model to provide a strong foundation for developing new sustainable methods and tools for cloud computing.}, } @article {pmid39345166, year = {2024}, author = {Nakamura, T and Nomura, T and Endo, M and Sakaguchi, A and Ruofan, H and Kashiwazaki, T and Umeki, T and Takase, K and Asavanant, W and Yoshikawa, JI and Furusawa, A}, title = {Long-term stability of squeezed light in a fiber-based system using automated alignment.}, journal = {The Review of scientific instruments}, volume = {95}, number = {9}, pages = {}, doi = {10.1063/5.0203988}, pmid = {39345166}, issn = {1089-7623}, abstract = {Providing a cloud service for optical quantum computing requires stabilizing the optical system for extended periods. It is advantageous to construct a fiber-based system, which does not require spatial alignment. However, fiber-based systems are instead subject to fiber-specific instabilities. For instance, there are phase drifts due to ambient temperature changes and external disturbances and polarization fluctuations due to the finite polarization extinction ratio of fiber components. Here, we report the success of measuring squeezed light with a fiber system for 24 h. To do this, we introduce stabilization mechanics to suppress fluctuations in the fiber system and an integrated controller to automatically align the entire system. The squeezed light at a wavelength of 1545.3 nm is measured every 2 min, where automated alignments are inserted every 30 min. The squeezing levels with an average of -4.42 dB are recorded with an extremely small standard deviation of 0.08 dB over 24 h. With the technologies developed here, we can build complicated optical setups with the fiber-based system and operate them automatically for extended periods, which is promising for cloud service of quantum computation.}, } @article {pmid39338834, year = {2024}, author = {López-Baldominos, I and Pospelova, V and Fernández-Sanz, L and Castillo-Martínez, A}, title = {Modeling and Analyzing the Availability of Technical Professional Profiles for the Success of Smart Cities Projects in Europe.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {18}, pages = {}, pmid = {39338834}, issn = {1424-8220}, support = {101052513//European Commission/ ; }, abstract = {The success of developing and implementing Smart Cities (SC) projects depends on a varied set of factors, where the availability of a qualified technical workforce is a critical one. The combination of ICT requirements, like the effectiveness and quality of solutions merging IoT, cloud computing, sensors, and communications with the work from many varied disciplines (e.g., civil engineering, architecture, etc.), mixed with aspects of environmental and business sustainability, makes the management of these projects really challenging. Reports forecast a scarcity of qualified candidates, given this complexity and the growth of activity in SC projects. The European project SMACITE has addressed the requirements of the qualification of an ICT workforce with an analysis of multiples sources of information from the labor market, feedback from involved stakeholders, and the literature. The goal was the development of two occupational ICT profiles as a reference for training and for the availability of candidates for job vacancies. The result is two ICT role profiles for engineers and technicians, mapped with the European skills frameworks ESCO and EN16234. The profiles determined the whole set of requirements, including not only the technical areas and soft skills, but also additional technical areas and sustainability and managerial skills and the analysis of different sources of information. Our work has also determined which existing ESCO occupations are similar to the two reference profiles, so they are better adapted to SC projects. The training activities of SMACITE have also suggested the amount of training expected for a varied sample of candidates who want to be qualified for SC projects.}, } @article {pmid39338808, year = {2024}, author = {Kopras, B and Idzikowski, F and Bogucka, H}, title = {A Survey on Reduction of Energy Consumption in Fog Networks-Communications and Computations.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {18}, pages = {}, pmid = {39338808}, issn = {1424-8220}, support = {CYBERSECIDENT/487845/IV/NCBR/2021//National Centre for Research and Development/ ; 2023/05/Y/ST7/00002//National Science Center/ ; }, abstract = {Fog networking has become an established architecture addressing various applications with strict latency, jitter, and bandwidth constraints. Fog Nodes (FNs) allow for flexible and effective computation offloading and content distribution. However, the transmission of computational tasks, the processing of these tasks, and finally sending the results back still incur energy costs. We survey the literature on fog computing, focusing on energy consumption. We take a holistic approach and look at energy consumed by devices located in all network tiers from the things tier through the fog tier to the cloud tier, including communication links between the tiers. Furthermore, fog network modeling is analyzed with particular emphasis on application scenarios and the energy consumed for communication and computation. We perform a detailed analysis of model parameterization, which is crucial for the results presented in the surveyed works. Finally, we survey energy-saving methods, putting them into different classification systems and considering the results presented in the surveyed works. Based on our analysis, we present a classification and comparison of the fog algorithmic models, where energy is spent on communication and computation, and where delay is incurred. We also classify the scenarios examined by the surveyed works with respect to the assumed parameters. Moreover, we systematize methods used to save energy in a fog network. These methods are compared with respect to their scenarios, objectives, constraints, and decision variables. Finally, we discuss future trends in fog networking and how related technologies and economics shall trade their increasing development with energy consumption.}, } @article {pmid39338747, year = {2024}, author = {Hyun, G and Oak, J and Kim, D and Kim, K}, title = {The Impact of an Automation System Built with Jenkins on the Efficiency of Container-Based System Deployment.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {18}, pages = {}, pmid = {39338747}, issn = {1424-8220}, support = {NRF-2021R1A2C2013933//Ministry of Science and ICT/ ; S3224694//Ministry of SMEs and Startups/ ; }, abstract = {This paper evaluated deployment efficiency by comparing manual deployment with automated deployment through a CI/CD pipeline using Jenkins. This study involved moving from a manual deployment process to an automated system using Jenkins and experimenting with both deployment methods in a real-world environment. The results showed that the automated deployment system significantly reduced the deployment time compared to manual deployment and significantly reduced the error rate. Manual deployment required human intervention at each step, making it time-consuming and prone to mistakes, while automated deployment using Jenkins automated each step to ensure consistency and maximized time efficiency through parallel processing. Automated testing verified the stability of the code before deployment, minimizing errors. This study demonstrates the effectiveness of adopting a CI/CD pipeline and shows that automated systems can provide high efficiency in real-world production environments. It also highlights the importance of security measures to prevent sensitive information leakage during CI/CD, suggesting the use of secrecy management tools and environment variables and limiting access rights. This research will contribute to exploring the applicability of CI/CD pipelines in different environments and, in doing so, validate the universality of automated systems.}, } @article {pmid39338710, year = {2024}, author = {Marković, D and Stamenković, Z and Đorđević, B and Ranđić, S}, title = {Image Processing for Smart Agriculture Applications Using Cloud-Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {18}, pages = {}, pmid = {39338710}, issn = {1424-8220}, support = {16DHBKIO20//Brandenburg/Bayern Initiative for Integration of Artificial Intelligence - Hardware Subjects in University Curriculum (BB-KI-Chips)/ ; }, mesh = {*Agriculture/methods ; *Image Processing, Computer-Assisted/methods ; *Cloud Computing ; *Neural Networks, Computer ; Crops, Agricultural ; Algorithms ; Humans ; Deep Learning ; }, abstract = {The widespread use of IoT devices has led to the generation of a huge amount of data and driven the need for analytical solutions in many areas of human activities, such as the field of smart agriculture. Continuous monitoring of crop growth stages enables timely interventions, such as control of weeds and plant diseases, as well as pest control, ensuring optimal development. Decision-making systems in smart agriculture involve image analysis with the potential to increase productivity, efficiency and sustainability. By applying Convolutional Neural Networks (CNNs), state recognition and classification can be performed based on images from specific locations. Thus, we have developed a solution for early problem detection and resource management optimization. The main concept of the proposed solution relies on a direct connection between Cloud and Edge devices, which is achieved through Fog computing. The goal of our work is creation of a deep learning model for image classification that can be optimized and adapted for implementation on devices with limited hardware resources at the level of Fog computing. This could increase the importance of image processing in the reduction of agricultural operating costs and manual labor. As a result of the off-load data processing at Edge and Fog devices, the system responsiveness can be improved, the costs associated with data transmission and storage can be reduced, and the overall system reliability and security can be increased. The proposed solution can choose classification algorithms to find a trade-off between size and accuracy of the model optimized for devices with limited hardware resources. After testing our model for tomato disease classification compiled for execution on FPGA, it was found that the decrease in test accuracy is as small as 0.83% (from 96.29% to 95.46%).}, } @article {pmid39338693, year = {2024}, author = {Nur, A and Muanenda, Y}, title = {Design and Evaluation of Real-Time Data Storage and Signal Processing in a Long-Range Distributed Acoustic Sensing (DAS) Using Cloud-Based Services.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {18}, pages = {}, pmid = {39338693}, issn = {1424-8220}, abstract = {In cloud-based Distributed Acoustic Sensing (DAS) sensor data management, we are confronted with two primary challenges. First, the development of efficient storage mechanisms capable of handling the enormous volume of data generated by these sensors poses a challenge. To solve this issue, we propose a method to address the issue of handling the large amount of data involved in DAS by designing and implementing a pipeline system to efficiently send the big data to DynamoDB in order to fully use the low latency of the DynamoDB data storage system for a benchmark DAS scheme for performing continuous monitoring over a 100 km range at a meter-scale spatial resolution. We employ the DynamoDB functionality of Amazon Web Services (AWS), which allows highly expandable storage capacity with latency of access of a few tens of milliseconds. The different stages of DAS data handling are performed in a pipeline, and the scheme is optimized for high overall throughput with reduced latency suitable for concurrent, real-time event extraction as well as the minimal storage of raw and intermediate data. In addition, the scalability of the DynamoDB-based data storage scheme is evaluated for linear and nonlinear variations of number of batches of access and a wide range of data sample sizes corresponding to sensing ranges of 1-110 km. The results show latencies of 40 ms per batch of access with low standard deviations of a few milliseconds, and latency per sample decreases for increasing the sample size, paving the way toward the development of scalable, cloud-based data storage services integrating additional post-processing for more precise feature extraction. The technique greatly simplifies DAS data handling in key application areas requiring continuous, large-scale measurement schemes. In addition, the processing of raw traces in a long-distance DAS for real-time monitoring requires the careful design of computational resources to guarantee requisite dynamic performance. Now, we will focus on the design of a system for the performance evaluation of cloud computing systems for diverse computations on DAS data. This system is aimed at unveiling valuable insights into performance metrics and operational efficiencies of computations on the data in the cloud, which will provide a deeper understanding of the system's performance, identify potential bottlenecks, and suggest areas for improvement. To achieve this, we employ the CloudSim framework. The analysis reveals that the virtual machine (VM) performance decreases significantly the processing time with more capable VMs, influenced by Processing Elements (PEs) and Million Instructions Per Second (MIPS). The results also reflect that, although a larger number of computations is required as the fiber length increases, with the subsequent increase in processing time, the overall speed of computation is still suitable for continuous real-time monitoring. We also see that VMs with lower performance in terms of processing speed and number of CPUs have more inconsistent processing times compared to those with higher performance, while not incurring significantly higher prices. Additionally, the impact of VM parameters on computation time is explored, highlighting the importance of resource optimization in the DAS system design for efficient performance. The study also observes a notable trend in processing time, showing a significant decrease for every additional 50,000 columns processed as the length of the fiber increases. This finding underscores the efficiency gains achieved with larger computational loads, indicating improved system performance and capacity utilization as the DAS system processes more extensive datasets.}, } @article {pmid39320977, year = {2024}, author = {Senthilkumar, G and Anandamurugan, S}, title = {Energy and time-aware scheduling in diverse virtualized cloud computing environments using optimized self-attention progressive generative adversarial network.}, journal = {Network (Bristol, England)}, volume = {}, number = {}, pages = {1-20}, doi = {10.1080/0954898X.2024.2391401}, pmid = {39320977}, issn = {1361-6536}, abstract = {The rapid growth of cloud computing has led to the widespread adoption of heterogeneous virtualized environments, offering scalable and flexible resources to meet diverse user demands. However, the increasing complexity and variability in workload characteristics pose significant challenges in optimizing energy consumption. Many scheduling algorithms have been suggested to address this. Therefore, a self-attention-based progressive generative adversarial network optimized with Dwarf Mongoose algorithm adopted Energy and Deadline Aware Scheduling in heterogeneous virtualized cloud computing (SAPGAN-DMA-DAS-HVCC) is proposed in this paper. Here, a self-attention based progressive generative adversarial network (SAPGAN) is proposed to schedule activities in a cloud environment with an objective function of makespan and energy consumption. Then Dwarf Mongoose algorithm is proposed to optimize the weight parameters of SAPGAN. Outcome of proposed approach SAPGAN-DMA-DAS-HVCC contains 32.77%, 34.83% and 35.76% higher right skewed makespan, 31.52%, 33.28% and 29.14% lower cost when analysed to the existing models, like task scheduling in heterogeneous cloud environment utilizing mean grey wolf optimization approach, energy and performance-efficient task scheduling in heterogeneous virtualized Energy and Performance Efficient Task Scheduling Algorithm, energy and make span aware scheduling of deadline sensitive tasks on the cloud environment, respectively.}, } @article {pmid39315479, year = {2024}, author = {Ghani, A and Heinrich, H and Brown, T and Schellhorn, K}, title = {Enhancing EEG data quality and precision for cloud-based clinical applications: an evaluation of the SLOG framework.}, journal = {Biomedical physics & engineering express}, volume = {10}, number = {6}, pages = {}, doi = {10.1088/2057-1976/ad7e2d}, pmid = {39315479}, issn = {2057-1976}, mesh = {*Electroencephalography/methods ; Humans ; *Cloud Computing ; *Signal Processing, Computer-Assisted ; *Artifacts ; *Brain/diagnostic imaging/physiology ; *Algorithms ; Signal-To-Noise Ratio ; Eye Movements/physiology ; Electrooculography/methods ; Data Accuracy ; }, abstract = {Automation is revamping our preprocessing pipelines, and accelerating the delivery of personalized digital medicine. It improves efficiency, reduces costs, and allows clinicians to treat patients without significant delays. However, the influx of multimodal data highlights the need to protect sensitive information, such as clinical data, and safeguard data fidelity. One of the neuroimaging modalities that produces large amounts of time-series data is Electroencephalography (EEG). It captures the neural dynamics in a task or resting brain state with high temporal resolution. EEG electrodes placed on the scalp acquire electrical activity from the brain. These electrical potentials attenuate as they cross multiple layers of brain tissue and fluid yielding relatively weaker signals than noise-low signal-to-noise ratio. EEG signals are further distorted by internal physiological artifacts, such as eye movements (EOG) or heartbeat (ECG), and external noise, such as line noise (50 Hz). EOG artifacts, due to their proximity to the frontal brain regions, are particularly challenging to eliminate. Therefore, a widely used EOG rejection method, independent component analysis (ICA), demands manual inspection of the marked EOG components before they are rejected from the EEG data. We underscore the inaccuracy of automatized ICA rejection and provide an auxiliary algorithm-Second Layer Inspection for EOG (SLOG) in the clinical environment. SLOG based on spatial and temporal patterns of eye movements, re-examines the already marked EOG artifacts and confirms no EEG-related activity is mistakenly eliminated in this artifact rejection step. SLOG achieved a 99% precision rate on the simulated dataset while 85% precision on the real EEG dataset. One of the primary considerations for cloud-based applications is operational costs, including computing power. Algorithms like SLOG allow us to maintain data fidelity and precision without overloading the cloud platforms and maxing out our budgets.}, } @article {pmid39314732, year = {2024}, author = {Khan, S and Jiangbin, Z and Ullah, F and Pervez Akhter, M and Khan, S and Awwad, FA and Ismail, EAA}, title = {Hybrid computing framework security in dynamic offloading for IoT-enabled smart home system.}, journal = {PeerJ. Computer science}, volume = {10}, number = {}, pages = {e2211}, pmid = {39314732}, issn = {2376-5992}, abstract = {In the distributed computing era, cloud computing has completely changed organizational operations by facilitating simple access to resources. However, the rapid development of the IoT has led to collaborative computing, which raises scalability and security challenges. To fully realize the potential of the Internet of Things (IoT) in smart home technologies, there is still a need for strong data security solutions, which are essential in dynamic offloading in conjunction with edge, fog, and cloud computing. This research on smart home challenges covers in-depth examinations of data security, privacy, processing speed, storage capacity restrictions, and analytics inside networked IoT devices. We introduce the Trusted IoT Big Data Analytics (TIBDA) framework as a comprehensive solution to reshape smart living. Our primary focus is mitigating pervasive data security and privacy issues. TIBDA incorporates robust trust mechanisms, prioritizing data privacy and reliability for secure processing and user information confidentiality within the smart home environment. We achieve this by employing a hybrid cryptosystem that combines Elliptic Curve Cryptography (ECC), Post Quantum Cryptography (PQC), and Blockchain technology (BCT) to protect user privacy and confidentiality. Additionally, we comprehensively compared four prominent Artificial Intelligence anomaly detection algorithms (Isolation Forest, Local Outlier Factor, One-Class SVM, and Elliptic Envelope). We utilized machine learning classification algorithms (random forest, k-nearest neighbors, support vector machines, linear discriminant analysis, and quadratic discriminant analysis) for detecting malicious and non-malicious activities in smart home systems. Furthermore, the main part of the research is with the help of an artificial neural network (ANN) dynamic algorithm; the TIBDA framework designs a hybrid computing system that integrates edge, fog, and cloud architecture and efficiently supports numerous users while processing data from IoT devices in real-time. The analysis shows that TIBDA outperforms these systems significantly across various metrics. In terms of response time, TIBDA demonstrated a reduction of 10-20% compared to the other systems under varying user loads, device counts, and transaction volumes. Regarding security, TIBDA's AUC values were consistently higher by 5-15%, indicating superior protection against threats. Additionally, TIBDA exhibited the highest trustworthiness with an uptime percentage 10-12% greater than its competitors. TIBDA's Isolation Forest algorithm achieved an accuracy of 99.30%, and the random forest algorithm achieved an accuracy of 94.70%, outperforming other methods by 8-11%. Furthermore, our ANN-based offloading decision-making model achieved a validation accuracy of 99% and reduced loss to 0.11, demonstrating significant improvements in resource utilization and system performance.}, } @article {pmid39312829, year = {2024}, author = {Lai, L and Liu, Y and Zhang, Y and Cao, Z and Yin, Y and Chen, X and Jin, J and Wu, S}, title = {Long-term spatiotemporal mapping in lacustrine environment by remote sensing:Review with case study, challenges, and future directions.}, journal = {Water research}, volume = {267}, number = {}, pages = {122457}, doi = {10.1016/j.watres.2024.122457}, pmid = {39312829}, issn = {1879-2448}, mesh = {*Remote Sensing Technology ; *Environmental Monitoring/methods ; *Lakes ; }, abstract = {Satellite remote sensing, unlike traditional ship-based sampling, possess the advantage of revisit capabilities and provides over 40 years of data support for observing lake environments at local, regional, and global scales. In recent years, global freshwater and coastal waters have faced adverse environmental issues, including harmful phytoplankton blooms, eutrophication, and extreme temperatures. To comprehensively address the goal of 'reviewing the past, assessing the present, and predicting the future', research increasingly focuses on developing and producing algorithms and products for long-term and large-scale mapping. This paper provides a comprehensive review of related research, evaluating the current status, shortcomings, and future trends of remote sensing datasets, monitoring targets, technical methods, and data processing platforms. The analysis demonstrated that the long-term spatiotemporal dynamic lake monitoring transition is thriving: (i) evolving from single data sources to satellite collaborative observations to keep a trade-off between temporal and spatial resolutions, (ii) shifting from single research targets to diversified and multidimensional objectives, (iii) progressing from empirical/mechanism models to machine/deep/transfer learning algorithms, (iv) moving from local processing to cloud-based platforms and parallel computing. Future directions include, but are not limited to: (i) establishing a global sampling data-sharing platform, (ii) developing precise atmospheric correction algorithms, (iii) building next-generation ocean color sensors and virtual constellation networks, (iv) introducing Interpretable Machine Learning (IML) and Explainable Artificial Intelligence (XAI) models, (v) integrating cloud computing, big data/model/computer, and Internet of Things (IoT) technologies, (vi) crossing disciplines with earth sciences, hydrology, computer science, and human geography, etc. In summary, this work offers valuable references and insights for academic research and government decision-making, which are crucial for enhancing the long-term tracking of aquatic ecological environment and achieving the Sustainable Development Goals (SDGs).}, } @article {pmid39312513, year = {2024}, author = {Shahzad, A and Chen, W and Shaheen, M and Zhang, Y and Ahmad, F}, title = {A robust algorithm for authenticated health data access via blockchain and cloud computing.}, journal = {PloS one}, volume = {19}, number = {9}, pages = {e0307039}, pmid = {39312513}, issn = {1932-6203}, mesh = {*Cloud Computing ; *Algorithms ; *Blockchain ; *Computer Security ; *Electronic Health Records ; Humans ; Confidentiality ; }, abstract = {In modern healthcare, providers increasingly use cloud services to store and share electronic medical records. However, traditional cloud hosting, which depends on intermediaries, poses risks to privacy and security, including inadequate control over access, data auditing, and tracking data origins. Additionally, current schemes face significant limitations such as scalability concerns, high computational overhead, practical implementation challenges, and issues with interoperability and data standardization. Unauthorized data access by cloud providers further exacerbates these concerns. Blockchain technology, known for its secure and decentralized nature, offers a solution by enabling secure data auditing in sharing systems. This research integrates blockchain into healthcare for efficient record management. We proposed a blockchain-based method for secure EHR management and integrated Ciphertext-Policy Attribute-Based Encryption (CP-ABE) for fine-grained access control. The proposed algorithm combines blockchain and smart contracts with a cloud-based healthcare Service Management System (SMS) to ensure secure and accessible EHRs. Smart contracts automate key management, encryption, and decryption processes, enhancing data security and integrity. The blockchain ledger authenticates data transactions, while the cloud provides scalability. The SMS manages access requests, enhancing resource allocation and response times. A dual authentication system confirms patient keys before granting data access, with failed attempts leading to access revocation and incident logging. Our analyses show that this algorithm significantly improves the security and efficiency of health data exchanges. By combining blockchain's decentralized structure with the cloud's scalability, this approach significantly improves EHR security protocols in modern healthcare setting.}, } @article {pmid39311476, year = {2025}, author = {Watson, A and Wozniak-O'Connor, V}, title = {The promise of artificial intelligence in health: Portrayals of emerging healthcare technologies.}, journal = {Sociology of health & illness}, volume = {47}, number = {1}, pages = {e13840}, pmid = {39311476}, issn = {1467-9566}, support = {CE200100005//Australian Research Council/ ; }, mesh = {*Artificial Intelligence ; Humans ; Australia ; *Biomedical Technology ; *Delivery of Health Care ; Robotics ; Mass Media ; }, abstract = {Emerging technologies of artificial intelligence (AI) and automated decision-making (ADM) promise to advance many industries. Healthcare is a key locus for new developments, where operational improvements are magnified by the bigger-picture promise of improved care and outcomes for patients. Forming the zeitgeist of contemporary sociotechnical innovation in healthcare, media portrayals of these technologies can shape how they are implemented, experienced and understood across healthcare systems. This article identifies current applications of AI and ADM within Australian healthcare contexts and analyses how these technologies are being portrayed within news and industry media. It offers a categorisation of leading applications of AI and ADM: monitoring and tracking, data management and analysis, cloud computing, and robotics. Discussing how AI and ADM are depicted in relation to health and care practices, it examines the sense of promise that is enlivened in these representations. The article concludes by considering the implications of promissory discourses for how technologies are understood and integrated into practices and sites of healthcare.}, } @article {pmid39311198, year = {2024}, author = {Vitorino, R}, title = {Transforming Clinical Research: The Power of High-Throughput Omics Integration.}, journal = {Proteomes}, volume = {12}, number = {3}, pages = {}, pmid = {39311198}, issn = {2227-7382}, abstract = {High-throughput omics technologies have dramatically changed biological research, providing unprecedented insights into the complexity of living systems. This review presents a comprehensive examination of the current landscape of high-throughput omics pipelines, covering key technologies, data integration techniques and their diverse applications. It looks at advances in next-generation sequencing, mass spectrometry and microarray platforms and highlights their contribution to data volume and precision. In addition, this review looks at the critical role of bioinformatics tools and statistical methods in managing the large datasets generated by these technologies. By integrating multi-omics data, researchers can gain a holistic understanding of biological systems, leading to the identification of new biomarkers and therapeutic targets, particularly in complex diseases such as cancer. The review also looks at the integration of omics data into electronic health records (EHRs) and the potential for cloud computing and big data analytics to improve data storage, analysis and sharing. Despite significant advances, there are still challenges such as data complexity, technical limitations and ethical issues. Future directions include the development of more sophisticated computational tools and the application of advanced machine learning techniques, which are critical for addressing the complexity and heterogeneity of omics datasets. This review aims to serve as a valuable resource for researchers and practitioners, highlighting the transformative potential of high-throughput omics technologies in advancing personalized medicine and improving clinical outcomes.}, } @article {pmid39309907, year = {2024}, author = {Alruwaili, O and Tanveer, M and Alotaibi, FM and Abdelfattah, W and Armghan, A and Alserhani, FM}, title = {Securing the IoT-enabled smart healthcare system: A PUF-based resource-efficient authentication mechanism.}, journal = {Heliyon}, volume = {10}, number = {18}, pages = {e37577}, pmid = {39309907}, issn = {2405-8440}, abstract = {As the Internet of Things (IoT) continues its rapid expansion, cloud computing has become integral to various smart healthcare applications. However, the proliferation of digital health services raises significant concerns regarding security and data privacy, making the protection of sensitive medical information paramount. To effectively tackle these challenges, it is crucial to establish resilient network infrastructure and data storage systems capable of defending against malicious entities and permitting access exclusively to authorized users. This requires the deployment of a robust authentication mechanism, wherein medical IoT devices, users (such as doctors or nurses), and servers undergo registration with a trusted authority. The process entails users retrieving data from the cloud server, while IoT devices collect patient data. Before granting access to data retrieval or storage, the cloud server verifies the authenticity of both the user and the IoT device, ensuring secure and authorized interactions within the system. With millions of interconnected smart medical IoT devices autonomously gathering and analyzing vital patient data, the importance of robust security measures becomes increasingly evident. Standard security protocols are fundamental in fortifying smart healthcare applications against potential threats. To confront these issues, this paper introduces a secure and resource-efficient cloud-enabled authentication mechanism. Through empirical analysis, it is demonstrated that our authentication mechanism effectively reduces computational and communication overheads, thereby improving overall system efficiency. Furthermore, both informal and formal analyses affirm the mechanism's resilience against potential cyberattacks, highlighting its effectiveness in safeguarding smart healthcare applications.}, } @article {pmid39307708, year = {2024}, author = {Mai, KT and Liu, XT and Lin, XY and Liu, SY and Zhao, CK and Du, JB}, title = {[Progress in application of machine learning in epidemiology].}, journal = {Zhonghua liu xing bing xue za zhi = Zhonghua liuxingbingxue zazhi}, volume = {45}, number = {9}, pages = {1321-1326}, doi = {10.3760/cma.j.cn112338-20240322-00148}, pmid = {39307708}, issn = {0254-6450}, support = {2021YFC2700705//National Key Research and Development Program of China/ ; 202310312014Z//Undergraduate Innovation and Entrepreneurship Training Program/ ; }, mesh = {*Machine Learning ; Humans ; China/epidemiology ; Artificial Intelligence ; Data Mining/methods ; Algorithms ; Big Data ; Epidemiology ; }, abstract = {Population based health data collection and analysis are important in epidemiological research. In recent years, with the rapid development of big data, Internet and cloud computing, artificial intelligence has gradually attracted attention of epidemiological researchers. More and more researchers are trying to use artificial intelligence algorithms for genome sequencing and medical image data mining, and for disease diagnosis, risk prediction and others. In recent years, machine learning, a branch of artificial intelligence, has been widely used in epidemiological research. This paper summarizes the key fields and progress in the application of machine learning in epidemiology, reviews the development history of machine learning, analyzes the classic cases and current challenges in its application in epidemiological research, and introduces the current application scenarios and future development trends of machine learning and artificial intelligence algorithms for the better exploration of the epidemiological research value of massive medical health data in China.}, } @article {pmid39300104, year = {2024}, author = {Mangalampalli, S and Karri, GR and Ratnamani, MV and Mohanty, SN and Jabr, BA and Ali, YA and Ali, S and Abdullaeva, BS}, title = {Efficient deep reinforcement learning based task scheduler in multi cloud environment.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {21850}, pmid = {39300104}, issn = {2045-2322}, abstract = {Task scheduling problem (TSP) is huge challenge in cloud computing paradigm as number of tasks comes to cloud application platform vary from time to time and all the tasks consists of variable length, runtime capacities. All these tasks may generated from various heterogeneous resources which comes onto cloud console directly effects the performance of cloud paradigm with increase in makespan, energy consumption, resource costs. Traditional task scheduling algorithms cannot handle these type of complex workloads in cloud paradigm. Many authors developed Task Scheduling algorithms by using metaheuristic techniques, hybrid approaches but all these algorithms give near optimal solutions but still TSP is a highly challenging and dynamic scenario as it resembles NP hard problem. Therefore, to tackle the TSP in cloud computing paradigm and schedule the tasks in an effective way in cloud paradigm, we formulated Adaptive Task scheduler which segments all the tasks comes to cloud console as sub tasks and fed these to the scheduler which is modeled by Improved Asynchronous Advantage Actor Critic Algorithm(IA3C) to generate schedules. This scheduling process is carried out in two stages. In first stage, all incoming tasks are segmented as sub tasks. After segmentation, all these sub tasks according to their size, execution time, communication time are grouped together and fed to the (ATSIA3C) scheduler. In the second stage, it checks for the above said constraints and disperse them onto the corresponding suitable processing capacity VMs resided in datacenters. Proposed ATSIA3C is simulated on Cloudsim. Extensive simulations are conducted using both fabricated worklogs and as well as realtime supercomputing worklogs. Our proposed mechanism evaluated over baseline algorithms i.e. RATS-HM, AINN-BPSO, MOABCQ. From results it is evident that our proposed ATSIA3C outperforms existing task schedulers by improving makespan by 70.49%. Resource cost is improved by 77.42%. Energy Consumption is improved over compared algorithms 74.24% in multi cloud environment by proposed ATSIA3C.}, } @article {pmid39296026, year = {2024}, author = {Azizi, MA and Niknam, T and Dehghani, M and Jokar, H}, title = {Cloud-fog architecture-based control of smart island microgrid in master-slave organization using disturbance observer-based hybrid backstepping sliding mode controller.}, journal = {Heliyon}, volume = {10}, number = {17}, pages = {e37453}, pmid = {39296026}, issn = {2405-8440}, abstract = {Distributed control is an effective method to coordinate the microgrid with various components, and also in a smart microgrid, communication graph layouts are essential since changing the topology unexpectedly could disrupt the operation of the distributed controllers, and also an imbalance may occur between the production and load. Hence, reducing the exchanged data between units and system operator is essential in order to reduce the transmitted data volume and computational burden. For this purpose, an islanded microgrid with multiple agents which is using cloud-fog computing is proposed here, in order to reduce the computing burden on the central control unit as well as reducing data exchange among units. To balance the production power and loads in a smart island with a stable voltage/frequency, a hybrid backstepping sliding mode controller (BSMC) with disturbance observer (DO) is suggested to control voltage/frequency and current in the MG-based master-slave organization. Therefore, this paper proposes a DO-driven BSMC for controlling voltage/frequency, and power of energy sources within a Master-Slave organization; in addition, the study proposes a clod-fog computing for enhancing performance, reducing transferred data volume, and processing information on time. In the extensive simulations, the suggested controller shows a reduction in steady-state error, a fast response, and a lower total harmonic distortion (THD) for nonlinear and linear loads less than 0.33 %. The fog layer serves as a local processing level, so it reduces the exchanged data between cloud and fog nodes.}, } @article {pmid39295428, year = {2024}, author = {Badorrek, S and Franklin, J and McBride, KA and Conway, L and Williams, K}, title = {Primary care practitioner and patient perspectives on care following bariatric surgery: A meta-synthesis of qualitative research.}, journal = {Obesity reviews : an official journal of the International Association for the Study of Obesity}, volume = {25}, number = {12}, pages = {e13829}, doi = {10.1111/obr.13829}, pmid = {39295428}, issn = {1467-789X}, support = {//University of Sydney/ ; }, mesh = {Humans ; *Bariatric Surgery ; *Qualitative Research ; *Primary Health Care ; }, abstract = {Primary care is central to ongoing health care following bariatric surgery and patients indicate a preference for receiving follow-up support by their primary care practitioner (PCP). This meta-synthesis investigates the perspectives of both PCPs and patients in post-bariatric surgery care provided by PCPs. The aim was to synthesize themes from qualitative research to recommend improvements in post-bariatric surgery clinical care in primary care settings. Systematic searches of Scopus, Medline, EMBASE, PsycINFO, the Cochrane Library, and Google Scholar resulted in the inclusion of eight papers in the meta-synthesis. Papers were critiqued using the Critical Appraisal Skills Program (CASP) and thematically coded in Quirkos Cloud. Seven themes were reached by author consensus including stigma and judgment; clinician barriers and facilitators; patient-related support needs; communication considerations; patient context or determinants; health care setting; and adapting to life after surgery. PCPs reported barriers including poor communication and guidance from bariatric surgery centers, limited knowledge and training in bariatric patient care, and patients who may have unrealistic outcomes and poor health literacy. Patients seek comprehensive care from their PCP, however, barriers hindering the provision of this care include adverse surgical outcomes, a poor relationship with their PCP, and limited and short-term follow-up care from the PCP. Insights from this meta-synthesis offer actionable recommendations for PCPs and bariatric surgery centers to enhance patient care immediately.}, } @article {pmid39281853, year = {2024}, author = {Cruz-Almeida, Y and Mehta, B and Haelterman, NA and Johnson, AJ and Heiting, C and Ernberg, M and Orange, D and Lotz, M and Boccanfuso, J and Smith, SB and Pela, M and Boline, J and Otero, M and Allen, K and Perez, D and Donnelly, C and Almarza, A and Olmer, M and Balkhi, H and Wagenaar, J and Martone, M and , }, title = {Clinical and biobehavioral phenotypic assessments and data harmonization for the RE-JOIN research consortium: Recommendations for common data element selection.}, journal = {Neurobiology of pain (Cambridge, Mass.)}, volume = {16}, number = {}, pages = {100163}, pmid = {39281853}, issn = {2452-073X}, support = {UC2 AR082196/AR/NIAMS NIH HHS/United States ; }, abstract = {BACKGROUND: The Restoring Joint Health and Function to Reduce Pain (RE-JOIN) Consortium is part of the Helping to End Addiction Long-term® (HEAL) Initiative. HEAL is an ambitious, NIH-wide initiative to speed scientific solutions to stem the national opioid public health crisis. The RE-JOIN consortium's over-arching goal is to define how chronic joint pain-mediating neurons innervate different articular and peri-articular tissues, with a focus on the knee and temporomandibular joints (TMJ) across species employing the latest neuroscience approaches. The aim of this manuscript is to elucidate the human data gathered by the RE-JOIN consortium, as well as to expound upon its underlying rationale and the methodologies and protocols for harmonization and standardization that have been instituted by the RE-JOIN Consortium.

METHODS: The consortium-wide human models working subgroup established the RE-JOIN minimal harmonized data elements that will be collected across all human studies and set the stage to develop parallel pre-clinical data collection standards. Data harmonization considerations included requirements from the HEAL program and recommendations from the consortium's researchers and experts on informatics, knowledge management, and data curation.

RESULTS: Multidisciplinary experts - including preclinical and clinical researchers, with both clinician-scientists- developed the RE-JOIN's Minimal Human Data Standard with required domains and outcome measures to be collected across projects and institutions. The RE-JOIN minimal data standard will include HEAL Common Data Elements (CDEs) (e.g., standardized demographics, general pain, psychosocial and functional measures), and RE-JOIN common data elements (R-CDE) (i.e., both general and joint-specific standardized and clinically important self-reported pain and function measures, as well as pressure pain thresholds part of quantitative sensory testing). In addition, discretionary, site-specific measures will be collected by individual institutions (e.g., expanded quantitative sensory testing and gait biomechanical assessments), specific to the knee or TMJ. Research teams will submit datasets of standardized metadata to the RE-JOIN Data Coordinating Center (DCG) via a secure cloud-based central data repository and computing infrastructure for researchers to share and conduct analyses on data collected by or acquired for RE-JOIN. RE-JOIN datasets will have protected health information (PHI) removed and be publicly available on the SPARC portal and accessible through the HEAL Data Ecosystem.

CONCLUSION: Data Harmonization efforts provide the multidisciplinary consortium with an opportunity to effectively collaborate across decentralized research teams, and data standardization sets the framework for efficient future analyses of RE-JOIN data collected by the consortium. The harmonized phenotypic information obtained will significantly enhance our understanding of the neurobiology of the pain-pathology relationships in humans, providing valuable insights for comparison with pre-clinical models.}, } @article {pmid39278954, year = {2024}, author = {Manogaran, N and Nandagopal, M and Abi, NE and Seerangan, K and Balusamy, B and Selvarajan, S}, title = {Integrating meta-heuristic with named data networking for secure edge computing in IoT enabled healthcare monitoring system.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {21532}, pmid = {39278954}, issn = {2045-2322}, mesh = {*Internet of Things ; *Computer Security ; Humans ; *Cloud Computing ; Heuristics ; Algorithms ; Delivery of Health Care ; Computer Communication Networks ; }, abstract = {The advancement in technology, with the "Internet of Things (IoT) is continuing a crucial task to accomplish distance medical care observation, where the effective and secure healthcare information retrieval is complex. However, the IoT systems have restricted resources hence it is complex to attain effective and secure healthcare information acquisition. The idea of smart healthcare has developed in diverse regions, where small-scale implementations of medical facilities are evaluated. In the IoT-aided medical devices, the security of the IoT systems and related information is highly essential on the other hand, the edge computing is a significant framework that rectifies their processing and computational issues. The edge computing is inexpensive, and it is a powerful framework to offer low latency information assistance by enhancing the computation and the transmission speed of the IoT systems in the medical sectors. The main intention of this work is to design a secure framework for Edge computing in IoT-enabled healthcare systems using heuristic-based authentication and "Named Data Networking (NDN)". There are three layers in the proposed model. In the first layer, many IoT devices are connected together, and using the cluster head formation, the patients are transmitting their data to the edge cloud layer. The edge cloud layer is responsible for storage and computing resources for rapidly caching and providing medical data. Hence, the patient layer is a new heuristic-based sanitization algorithm called Revised Position of Cat Swarm Optimization (RPCSO) with NDN for hiding the sensitive data that should not be leaked to unauthorized users. This authentication procedure is adopted as a multi-objective function key generation procedure considering constraints like hiding failure rate, information preservation rate, and degree of modification. Further, the data from the edge cloud layer is transferred to the user layer, where the optimal key generation with NDN-based restoration is adopted, thus achieving efficient and secure medical data retrieval. The framework is evaluated quantitatively on diverse healthcare datasets from University of California (UCI) and Kaggle repository and experimental analysis shows the superior performance of the proposed model in terms of latency and cost when compared to existing solutions. The proposed model performs the comparative analysis of the existing algorithms such as Cat Swarm Optimization (CSO), Osprey Optimization Algorithm (OOA), Mexican Axolotl Optimization (MAO), Single candidate optimizer (SCO). Similarly, the cryptography tasks like "Rivest-Shamir-Adleman (RSA), Advanced Encryption Standard (AES), Elliptic Curve Cryptography (ECC), and Data sanitization and Restoration (DSR) are applied and compared with the RPCSO in the proposed work. The results of the proposed model is compared on the basis of the best, worst, mean, median and standard deviation. The proposed RPCSO outperforms all other models with values of 0.018069361, 0.50564046, 0.112643119, 0.018069361, 0.156968355 and 0.283597992, 0.467442652, 0.32920734, 0.328581887, 0.063687386 for both dataset 1 and dataset 2 respectively.}, } @article {pmid39275461, year = {2024}, author = {Alharthi, S and Alshamsi, A and Alseiari, A and Alwarafy, A}, title = {Auto-Scaling Techniques in Cloud Computing: Issues and Research Directions.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {17}, pages = {}, pmid = {39275461}, issn = {1424-8220}, support = {12T047//United Arab Emirates University/ ; }, abstract = {In the dynamic world of cloud computing, auto-scaling stands as a beacon of efficiency, dynamically aligning resources with fluctuating demands. This paper presents a comprehensive review of auto-scaling techniques, highlighting significant advancements and persisting challenges in the field. First, we overview the fundamental principles and mechanisms of auto-scaling, including its role in improving cost efficiency, performance, and energy consumption in cloud services. We then discuss various strategies employed in auto-scaling, ranging from threshold-based rules and queuing theory to sophisticated machine learning and time series analysis approaches. After that, we explore the critical issues in auto-scaling practices and review several studies that demonstrate how these challenges can be addressed. We then conclude by offering insights into several promising research directions, emphasizing the development of predictive scaling mechanisms and the integration of advanced machine learning techniques to achieve more effective and efficient auto-scaling solutions.}, } @article {pmid39269931, year = {2024}, author = {Coleman, JR and Baker, JN and Ketkar, S and Butler, AM and Williams, L and Hammonds-Odie, L and Atkinson, EG and Murray, DD and Lee, B and Worley, KC}, title = {Development and evaluation of a training curriculum to engage researchers on accessing and analyzing the All of Us data.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {31}, number = {12}, pages = {2857-2868}, pmid = {39269931}, issn = {1527-974X}, support = {1 OT2 OD026549//Office of the Director: Regional Medical Centers/ ; 5 U2C OD023196//Data and Research Center/ ; U2C OD023196/OD/NIH HHS/United States ; 1 U24 OD023163//Participant Technology Systems Center/ ; OT2 OD026555/OD/NIH HHS/United States ; 3 OT2 OD023205//Communications and Engagement/ ; HHSN 263201600085U//Federally Qualified Health Centers/ ; OT2 OD031932/OD/NIH HHS/United States ; }, mesh = {*Curriculum ; Research Personnel/education ; Humans ; United States ; }, abstract = {OBJECTIVE: The All of Us Evenings with Genetics (EwG) Research Program at Baylor College of Medicine (BCM), funded to engage research scholars to work with the All of Us data, developed a training curriculum for the Researcher Workbench, the platform to access and analyze All of Us data. All of Us EwG developed the curriculum so that it could teach scholars regardless of their skills and background in programming languages and cloud computing. All of Us EwG delivered this curriculum at the first annual All of Us EwG Faculty Summit in May 2022. The curriculum was evaluated both during and after the Faculty Summit so that it could be improved for future training.

MATERIALS AND METHODS: Surveys were administered to assess scholars' familiarity with the programming languages and computational tools required to use the Researcher Workbench. The curriculum was developed using backward design and was informed by the survey results, a review of available resources for training users on the Researcher Workbench, and All of Us EwG members' collective experience training students. The curriculum was evaluated using feedback surveys during the Faculty Summit as well as virtual meetings and emails following the Faculty Summit.

RESULTS: The evaluation results demonstrated the success of the curriculum and identified areas for improvement.

DISCUSSION AND CONCLUSION: The curriculum has been adapted and improved in response to evaluations and in response to changes to the All of Us data and infrastructure to train more researchers through this program and other scholarly programs.}, } @article {pmid39268148, year = {2024}, author = {Rorden, C and Webster, M and Drake, C and Jenkinson, M and Clayden, JD and Li, N and Hanayik, T}, title = {niimath and fslmaths: replication as a method to enhance popular neuroimaging tools.}, journal = {Aperture neuro}, volume = {4}, number = {}, pages = {}, pmid = {39268148}, issn = {2957-3963}, support = {P50 DC014664/DC/NIDCD NIH HHS/United States ; RF1 MH133701/MH/NIMH NIH HHS/United States ; }, abstract = {Neuroimaging involves the acquisition of extensive 3D images and 4D time series data to gain insights into brain structure and function. The analysis of such data necessitates both spatial and temporal processing. In this context, "fslmaths" has established itself as a foundational software tool within our field, facilitating domain-specific image processing. Here, we introduce "niimath," a clone of fslmaths. While the term "clone" often carries negative connotations, we illustrate the merits of replicating widely-used tools, touching on aspects of licensing, performance optimization, and portability. For instance, our work enables the popular functions of fslmaths to be disseminated in various forms, such as a high-performance compiled R package known as "imbibe", a Windows executable, and a WebAssembly plugin compatible with JavaScript. This versatility is demonstrated through our NiiVue live demo web page. This application allows 'edge computing' where image processing can be done with a zero-footprint tool that runs on any web device without requiring private data to be shared to the cloud. Furthermore, our efforts have contributed back to FSL, which has integrated the optimizations that we've developed. This synergy has enhanced the overall transparency, utility and efficiency of tools widely relied upon in the neuroimaging community.}, } @article {pmid39266450, year = {2024}, author = {Gnimpieba, EZ and Hartman, TW and Do, T and Zylla, J and Aryal, S and Haas, SJ and Agany, DDM and Gurung, BDS and Doe, V and Yosufzai, Z and Pan, D and Campbell, R and Huber, VC and Sani, R and Gadhamshetty, V and Lushbough, C}, title = {Biofilm marker discovery with cloud-based dockerized metagenomics analysis of microbial communities.}, journal = {Briefings in bioinformatics}, volume = {25}, number = {Supplement_1}, pages = {}, pmid = {39266450}, issn = {1477-4054}, support = {#1849206//National Science Foundation/ ; //Institutional Development Award/ ; /GM/NIGMS NIH HHS/United States ; P20GM103443/NH/NIH HHS/United States ; }, mesh = {*Biofilms/growth & development ; *Metagenomics/methods ; Microbiota/genetics ; Cloud Computing ; Humans ; Computational Biology/methods ; }, abstract = {In an environment, microbes often work in communities to achieve most of their essential functions, including the production of essential nutrients. Microbial biofilms are communities of microbes that attach to a nonliving or living surface by embedding themselves into a self-secreted matrix of extracellular polymeric substances. These communities work together to enhance their colonization of surfaces, produce essential nutrients, and achieve their essential functions for growth and survival. They often consist of diverse microbes including bacteria, viruses, and fungi. Biofilms play a critical role in influencing plant phenotypes and human microbial infections. Understanding how these biofilms impact plant health, human health, and the environment is important for analyzing genotype-phenotype-driven rule-of-life functions. Such fundamental knowledge can be used to precisely control the growth of biofilms on a given surface. Metagenomics is a powerful tool for analyzing biofilm genomes through function-based gene and protein sequence identification (functional metagenomics) and sequence-based function identification (sequence metagenomics). Metagenomic sequencing enables a comprehensive sampling of all genes in all organisms present within a biofilm sample. However, the complexity of biofilm metagenomic study warrants the increasing need to follow the Findability, Accessibility, Interoperability, and Reusable (FAIR) Guiding Principles for scientific data management. This will ensure that scientific findings can be more easily validated by the research community. This study proposes a dockerized, self-learning bioinformatics workflow to increase the community adoption of metagenomics toolkits in a metagenomics and meta-transcriptomics investigation. Our biofilm metagenomics workflow self-learning module includes integrated learning resources with an interactive dockerized workflow. This module will allow learners to analyze resources that are beneficial for aggregating knowledge about biofilm marker genes, proteins, and metabolic pathways as they define the composition of specific microbial communities. Cloud and dockerized technology can allow novice learners-even those with minimal knowledge in computer science-to use complicated bioinformatics tools. Our cloud-based, dockerized workflow splits biofilm microbiome metagenomics analyses into four easy-to-follow submodules. A variety of tools are built into each submodule. As students navigate these submodules, they learn about each tool used to accomplish the task. The downstream analysis is conducted using processed data obtained from online resources or raw data processed via Nextflow pipelines. This analysis takes place within Vertex AI's Jupyter notebook instance with R and Python kernels. Subsequently, results are stored and visualized in Google Cloud storage buckets, alleviating the computational burden on local resources. The result is a comprehensive tutorial that guides bioinformaticians of any skill level through the entire workflow. It enables them to comprehend and implement the necessary processes involved in this integrated workflow from start to finish. This manuscript describes the development of a resource module that is part of a learning platform named "NIGMS Sandbox for Cloud-based Learning" https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox [1] at the beginning of this Supplement. This module delivers learning materials on the analysis of bulk and single-cell ATAC-seq data in an interactive format that uses appropriate cloud resources for data access and analyses.}, } @article {pmid39261726, year = {2024}, author = {Sharma, D and Rath, SP and Kundu, B and Korkmaz, A and S, H and Thompson, D and Bhat, N and Goswami, S and Williams, RS and Goswami, S}, title = {Linear symmetric self-selecting 14-bit kinetic molecular memristors.}, journal = {Nature}, volume = {633}, number = {8030}, pages = {560-566}, pmid = {39261726}, issn = {1476-4687}, mesh = {*Neural Networks, Computer ; Kinetics ; Artificial Intelligence ; Signal-To-Noise Ratio ; Ligands ; Thermodynamics ; Fourier Analysis ; Signal Processing, Computer-Assisted/instrumentation ; }, abstract = {Artificial Intelligence (AI) is the domain of large resource-intensive data centres that limit access to a small community of developers[1,2]. Neuromorphic hardware promises greatly improved space and energy efficiency for AI but is presently only capable of low-accuracy operations, such as inferencing in neural networks[3-5]. Core computing tasks of signal processing, neural network training and natural language processing demand far higher computing resolution, beyond that of individual neuromorphic circuit elements[6-8]. Here we introduce an analog molecular memristor based on a Ru-complex of an azo-aromatic ligand with 14-bit resolution. Precise kinetic control over a transition between two thermodynamically stable molecular electronic states facilitates 16,520 distinct analog conductance levels, which can be linearly and symmetrically updated or written individually in one time step, substantially simplifying the weight update procedure over existing neuromorphic platforms[3]. The circuit elements are unidirectional, facilitating a selector-less 64 × 64 crossbar-based dot-product engine that enables vector-matrix multiplication, including Fourier transform, in a single time step. We achieved more than 73 dB signal-to-noise-ratio, four orders of magnitude improvement over the state-of-the-art methods[9-11], while consuming 460× less energy than digital computers[12,13]. Accelerators leveraging these molecular crossbars could transform neuromorphic computing, extending it beyond niche applications and augmenting the core of digital electronics from the cloud to the edge[12,13].}, } @article {pmid39261468, year = {2024}, author = {Saad, F and Burnim, J and Carroll, C and Patton, B and Köster, U and A Saurous, R and Hoffman, M}, title = {Scalable spatiotemporal prediction with Bayesian neural fields.}, journal = {Nature communications}, volume = {15}, number = {1}, pages = {7942}, pmid = {39261468}, issn = {2041-1723}, abstract = {Spatiotemporal datasets, which consist of spatially-referenced time series, are ubiquitous in diverse applications, such as air pollution monitoring, disease tracking, and cloud-demand forecasting. As the scale of modern datasets increases, there is a growing need for statistical methods that are flexible enough to capture complex spatiotemporal dynamics and scalable enough to handle many observations. This article introduces the Bayesian Neural Field (BAYESNF), a domain-general statistical model that infers rich spatiotemporal probability distributions for data-analysis tasks including forecasting, interpolation, and variography. BAYESNF integrates a deep neural network architecture for high-capacity function estimation with hierarchical Bayesian inference for robust predictive uncertainty quantification. Evaluations against prominent baselines show that BAYESNF delivers improvements on prediction problems from climate and public health data containing tens to hundreds of thousands of measurements. Accompanying the paper is an open-source software package (https://github.com/google/bayesnf) that runs on GPU and TPU accelerators through the JAX machine learning platform.}, } @article {pmid39258203, year = {2024}, author = {Sun, W and Tohirovich Dedahanov, A and Li, WP and Young Shin, H}, title = {Sanctions and opportunities: Factors affecting China's high-tech SMEs adoption of artificial intelligence computing leasing business.}, journal = {Heliyon}, volume = {10}, number = {16}, pages = {e36620}, pmid = {39258203}, issn = {2405-8440}, abstract = {Due to sanctions, more Chinese high-tech SMEs are turning to rent AI computing power through cloud service providers. Therefore, it is necessary to give a variety of suggestions for China's high-tech SMEs to better develop AI applications through computing power leasing. Because traditional theories are difficult to explain this new technology adoption behavior, this research combines and extends TTF and UTAUT2 theories to take an empirical research. A total of 387 questionnaires were received, of which incomplete questionnaires and invalid questionnaires were issued, leaving 281 valid questionnaires. The results indicate that SME innovativeness, perceived risk, performance expectancy, price value and task technology fit are all significantly related to usage, whereas task technology fit moderates the other relationships significantly. Results give a variety of suggestions for China's high-tech SMEs to better develop AI applications through computing power leasing in the context of sanctions. This study not only suggests ways to increase the competitiveness of SMEs by optimizing leasing services but also give directions in investors' investment decisions. The findings are also applicable to the large-scale application of China's domestic AI chips in computing power leasing scenarios in the future.}, } @article {pmid39253244, year = {2024}, author = {Wang, H and Zhang, Y and Wang, XA and Yang, X}, title = {An improved identity-based public audit protocol for cloud storage.}, journal = {Heliyon}, volume = {10}, number = {16}, pages = {e36273}, doi = {10.1016/j.heliyon.2024.e36273}, pmid = {39253244}, issn = {2405-8440}, abstract = {With the rapid development of informatization, a vast amount of data is continuously generated and accumulated, leading to the emergence of cloud storage services. However, data stored in the cloud is beyond the control of users, posing various security risks. Cloud data auditing technology enables the inspection of data integrity in the cloud without the necessity of data downloading. Among these, public auditing schemes have experienced rapid development due to their ability to avoid additional user auditing expenses. However, malicious third-party auditors can compromise data privacy. This paper proposes an improved identity-based cloud auditing scheme that can resist malicious auditors. This scheme is also constructed on an identity-based public auditing scheme using blockchain to prevent malicious auditing. We found the scheme is not secure because a malicious cloud server can forge authentication tags for outsourced data blocks, while our scheme has not these security flaws. Through security proofs and performance analysis, we further demonstrate that our scheme is secure and efficient. Additionally, our scheme has typical application scenarios.}, } @article {pmid39253170, year = {2024}, author = {Mingwei, YU and Feng, LI and Yonggang, GUO and Libin, SU and Deshun, QIN}, title = {Study of the patterns of variations in ice lakes and the factors influencing these changes on the southeastern Tibetan plateau.}, journal = {Heliyon}, volume = {10}, number = {16}, pages = {e36406}, pmid = {39253170}, issn = {2405-8440}, abstract = {The ice lakes in the southeastern Qinghai-Tibet Plateau have exhibited a pronounced expansion against the backdrop of global warming, consequently amplifying the local risk of ice lake outburst disasters. However, surveys of ice lake changes in the entire region have consistently been incomplete due to the prevalent high cloud density. On the basis of Landsat remote sensing images and the Google Earth Engine (GEE) cloud computing platform, in this study, the full convolution segmentation algorithm is utilized to accurately and comprehensively map the regional distribution of ice lakes in southeastern Tibet at consistent time intervals in 1993, 2008, and 2023. Furthermore, the formation, distribution, and dynamic changes in these ice lakes are investigated. The numbers of ice lakes discovered in 1993, 2008, and 2023 were 2520, 3198, and 3877, respectively. These lakes covered areas of approximately 337.64 ± 36.86 km[2], 363.92 ± 40.90 km[2], and 395.74 ± 22.72 km[2], respectively. These ice lakes are located primarily between altitudes of 4442 m and 4909 m. The total area experienced an annual growth rate of approximately 0.57 % from 1993 to 2023. In the present study, the long-term variations in ice lakes in each district and county are examined. These findings indicate that between 1993 and 2023, the expansion of ice lakes was more pronounced in regions with a large number of marine glaciers. Notably, Basu County presented the highest annual growth rate of the ice lake population, at 6.23 %, followed by Bomi County, at 4.28 %, and finally, Zayul County, at 2.94 %. The accelerated shrinkage of marine glaciers induced by global warming is the primary driver behind the expansion of ice lakes. The results obtained from this research will enhance our overall understanding of the complex dynamics and mechanisms that govern the formation of ice lakes while also offering valuable perspectives on the potential risks linked to their expansion in this particular area.}, } @article {pmid39252255, year = {2024}, author = {Zhao, H and Zhang, Z and Tang, J}, title = {Enhancing rural healthcare through internet-based remote collaborative outpatient services: A comprehensive evaluation in Changzhi, Shanxi Province.}, journal = {Medicine}, volume = {103}, number = {36}, pages = {e39614}, pmid = {39252255}, issn = {1536-5964}, support = {HPYJ202202//Heping Hospital Affiliated to Changzhi Medical College Faculty Research Fund/ ; }, mesh = {Humans ; China ; *Rural Health Services/organization & administration ; *Telemedicine ; *Internet ; Male ; Female ; *Patient Satisfaction ; Adult ; Middle Aged ; Health Services Accessibility ; Ambulatory Care/methods/organization & administration ; Rural Population ; Aged ; Young Adult ; Adolescent ; }, abstract = {BACKGROUND: The advancement of digital technology, particularly telemedicine, has become crucial in improving healthcare access in rural areas. By integrating cloud computing and mHealth technologies, Internet-based Collaborative Outpatient Clinics offer a promising solution to overcome the limitations of traditional healthcare delivery in underserved communities.

METHODS: A trial was conducted in 4 counties of Changzhi City in Shanxi Province, China. The system extended to 495 rural communities and served over 5000 rural residents. Deep learning algorithms were employed to analyze medical data patterns to increase the accuracy of diagnoses and the quality of personalized treatment recommendations.

RESULTS: After the implementation of the system, there was a significant improvement in the satisfaction levels of rural residents regarding medical services; the accuracy of medical consultations increased by 30%, and the convenience of medical access improved by 50%. There was also a notable enhancement in overall health management. Satisfaction rates among healthcare professionals and rural inhabitants were over 90% and 85%, respectively, indicating that the system has had a significant positive impact on the quality of health-care services.

CONCLUSION: The study confirms the feasibility of implementing telemedicine services in rural areas and offers evidence and an operational framework for promoting innovative healthcare models on a large scale.}, } @article {pmid39248308, year = {2024}, author = {, and Hayrapetyan, A and Tumasyan, A and Adam, W and Andrejkovic, JW and Bergauer, T and Chatterjee, S and Damanakis, K and Dragicevic, M and Hussain, PS and Jeitler, M and Krammer, N and Li, A and Liko, D and Mikulec, I and Schieck, J and Schöfbeck, R and Schwarz, D and Sonawane, M and Templ, S and Waltenberger, W and Wulz, CE and Darwish, MR and Janssen, T and Mechelen, PV and Bols, ES and D'Hondt, J and Dansana, S and De Moor, A and Delcourt, M and Faham, HE and Lowette, S and Makarenko, I and Müller, D and Sahasransu, AR and Tavernier, S and Tytgat, M and Onsem, GPV and Putte, SV and Vannerom, D and Clerbaux, B and Das, AK and De Lentdecker, G and Favart, L and Gianneios, P and Hohov, D and Jaramillo, J and Khalilzadeh, A and Khan, FA and Lee, K and Mahdavikhorrami, M and Malara, A and Paredes, S and Thomas, L and Bemden, MV and Velde, CV and Vanlaer, P and De Coen, M and Dobur, D and Hong, Y and Knolle, J and Lambrecht, L and Mestdach, G and Amarilo, KM and Rendón, C and Samalan, A and Skovpen, K and Bossche, NVD and Linden, JV and Wezenbeek, L and Benecke, A and Bethani, A and Bruno, G and Caputo, C and Delaere, C and Donertas, IS and Giammanco, A and Jaffel, K and Jain, S and Lemaitre, V and Lidrych, J and Mastrapasqua, P and Mondal, K and Tran, TT and Wertz, S and Alves, GA and Coelho, E and Hensel, C and De Oliveira, TM and Moraes, A and Teles, PR and Soeiro, M and Júnior, WLA and Pereira, MAG and Filho, MBF and Malbouisson, HB and Carvalho, W and Chinellato, J and Da Costa, EM and Da Silveira, GG and De Jesus Damiao, D and De Souza, SF and De Souza, RG and Martins, J and Herrera, CM and Mundim, L and Nogima, H and Pinheiro, JP and Santoro, A and Sznajder, A and Thiel, M and Pereira, AV and Bernardes, CA and Calligaris, L and Tomei, TRFP and Gregores, EM and Mercadante, PG and Novaes, SF and Orzari, B and Padula, SS and Aleksandrov, A and Antchev, G and Hadjiiska, R and Iaydjiev, P and Misheva, M and Shopova, M and Sultanov, G and Dimitrov, A and Litov, L and Pavlov, B and Petkov, P and Petrov, A and Shumka, E and Keshri, S and Thakur, S and Cheng, T and Javaid, T and Yuan, L and Hu, Z and Liu, J and Yi, K and Chen, GM and Chen, HS and Chen, M and Iemmi, F and Jiang, CH and Kapoor, A and Liao, H and Liu, ZA and Sharma, R and Song, JN and Tao, J and Wang, C and Wang, J and Wang, Z and Zhang, H and Agapitos, A and Ban, Y and Levin, A and Li, C and Li, Q and Mao, Y and Qian, SJ and Sun, X and Wang, D and Yang, H and Zhang, L and Zhou, C and You, Z and Lu, N and Bauer, G and Gao, X and Leggat, D and Okawa, H and Lin, Z and Lu, C and Xiao, M and Avila, C and Trujillo, DAB and Cabrera, A and Florez, C and Fraga, J and Vega, JAR and Guisao, JM and Ramirez, F and Rodriguez, M and Alvarez, JDR and Giljanovic, D and Godinovic, N and Lelas, D and Sculac, A and Kovac, M and Sculac, T and Bargassa, P and Brigljevic, V and Chitroda, BK and Ferencek, D and Mishra, S and Starodumov, A and Susa, T and Attikis, A and Christoforou, K and Konstantinou, S and Mousa, J and Nicolaou, C and Ptochos, F and Razis, PA and Rykaczewski, H and Saka, H and Stepennov, A and Finger, M and Finger, M and Kveton, A and Ayala, E and Jarrin, EC and Abdelalim, AA and Salama, E and Mahmoud, MA and Mohammed, Y and Ehataht, K and Kadastik, M and Lange, T and Nandan, S and Nielsen, C and Pata, J and Raidal, M and Tani, L and Veelken, C and Kirschenmann, H and Osterberg, K and Voutilainen, M and Bharthuar, S and Brücken, E and Garcia, F and Kallonen, KTS and Kinnunen, R and Lampén, T and Lassila-Perini, K and Lehti, S and Lindén, T and Martikainen, L and Myllymäki, M and Rantanen, MM and Siikonen, H and Tuominen, E and Tuominiemi, J and Luukka, P and Petrow, H and Besancon, M and Couderc, F and Dejardin, M and Denegri, D and Faure, JL and Ferri, F and Ganjour, S and Gras, P and de Monchenault, GH and Lohezic, V and Malcles, J and Rander, J and Rosowsky, A and Sahin, MÖ and Savoy-Navarro, A and Simkina, P and Titov, M and Tornago, M and Barrera, CB and Beaudette, F and Perraguin, AB and Busson, P and Cappati, A and Charlot, C and Chiusi, M and Damas, F and Davignon, O and De Wit, A and Alves, BAFS and Ghosh, S and Gilbert, A and de Cassagnac, RG and Hakimi, A and Harikrishnan, B and Kalipoliti, L and Liu, G and Motta, J and Nguyen, M and Ochando, C and Portales, L and Salerno, R and Sauvan, JB and Sirois, Y and Tarabini, A and Vernazza, E and Zabi, A and Zghiche, A and Agram, JL and Andrea, J and Apparu, D and Bloch, D and Brom, JM and Chabert, EC and Collard, C and Falke, S and Goerlach, U and Grimault, C and Haeberle, R and Bihan, AL and Meena, M and Saha, G and Sessini, MA and Hove, PV and Beauceron, S and Blancon, B and Boudoul, G and Chanon, N and Choi, J and Contardo, D and Depasse, P and Dozen, C and Mamouni, HE and Fay, J and Gascon, S and Gouzevitch, M and Greenberg, C and Grenier, G and Ille, B and Laktineh, IB and Lethuillier, M and Mirabito, L and Perries, S and Purohit, A and Donckt, MV and Verdier, P and Xiao, J and Bagaturia, I and Lomidze, I and Tsamalaidze, Z and Botta, V and Feld, L and Klein, K and Lipinski, M and Meuser, D and Pauls, A and Röwert, N and Teroerde, M and Diekmann, S and Dodonova, A and Eich, N and Eliseev, D and Engelke, F and Erdmann, J and Erdmann, M and Fackeldey, P and Fischer, B and Hebbeker, T and Hoepfner, K and Ivone, F and Jung, A and Lee, MY and Mausolf, F and Merschmeyer, M and Meyer, A and Mukherjee, S and Noll, D and Nowotny, F and Pozdnyakov, A and Rath, Y and Redjeb, W and Rehm, F and Reithler, H and Sarkar, U and Sarkisovi, V and Schmidt, A and Sharma, A and Spah, JL and Stein, A and Da Silva De Araujo, FT and Vigilante, L and Wiedenbeck, S and Zaleski, S and Dziwok, C and Flügge, G and Ahmad, WH and Kress, T and Nowack, A and Pooth, O and Stahl, A and Ziemons, T and Zotz, A and Petersen, HA and Martin, MA and Alimena, J and Amoroso, S and An, Y and Baxter, S and Bayatmakou, M and Gonzalez, HB and Behnke, O and Belvedere, A and Bhattacharya, S and Blekman, F and Borras, K and Campbell, A and Cardini, A and Cheng, C and Colombina, F and Rodríguez, SC and Silva, GC and De Silva, M and Eckerlin, G and Eckstein, D and Banos, LIE and Filatov, O and Gallo, E and Geiser, A and Giraldi, A and Guglielmi, V and Guthoff, M and Hinzmann, A and Jafari, A and Jeppe, L and Jomhari, NZ and Kaech, B and Kasemann, M and Kleinwort, C and Kogler, R and Komm, M and Krücker, D and Lange, W and Pernia, DL and Lipka, K and Lohmann, W and Mankel, R and Melzer-Pellmann, IA and Morentin, MM and Meyer, AB and Milella, G and Mussgiller, A and Nair, LP and Nürnberg, A and Otarid, Y and Park, J and Adán, DP and Ranken, E and Raspereza, A and Lopes, BR and Rübenach, J and Saggio, A and Scham, M and Schnake, S and Schütze, P and Schwanenberger, C and Selivanova, D and Sharko, K and Shchedrolosiev, M and Ricardo, RES and Stafford, D and Vazzoler, F and Barroso, AV and Walsh, R and Wang, Q and Wen, Y and Wichmann, K and Wiens, L and Wissing, C and Yang, Y and Santos, AZC and Albrecht, A and Albrecht, S and Antonello, M and Bein, S and Benato, L and Bollweg, S and Bonanomi, M and Connor, P and Eich, M and Morabit, KE and Fischer, Y and Garbers, C and Garutti, E and Grohsjean, A and Haller, J and Jabusch, HR and Kasieczka, G and Keicher, P and Klanner, R and Korcari, W and Kramer, T and Kutzner, V and Labe, F and Lange, J and Lobanov, A and Matthies, C and Mehta, A and Moureaux, L and Mrowietz, M and Nigamova, A and Nissan, Y and Paasch, A and Rodriguez, KJP and Quadfasel, T and Raciti, B and Rieger, M and Savoiu, D and Schindler, J and Schleper, P and Schröder, M and Schwandt, J and Sommerhalder, M and Stadie, H and Steinbrück, G and Tews, A and Wolf, M and Brommer, S and Burkart, M and Butz, E and Chwalek, T and Dierlamm, A and Droll, A and Faltermann, N and Giffels, M and Gottmann, A and Hartmann, F and Hofsaess, R and Horzela, M and Husemann, U and Kieseler, J and Klute, M and Koppenhöfer, R and Lawhorn, JM and Link, M and Lintuluoto, A and Maier, S and Mitra, S and Mormile, M and Müller, T and Neukum, M and Oh, M and Presilla, M and Quast, G and Rabbertz, K and Regnery, B and Shadskiy, N and Shvetsov, I and Simonis, HJ and Toms, M and Trevisani, N and Cube, RFV and Wassmer, M and Wieland, S and Wittig, F and Wolf, R and Zuo, X and Anagnostou, G and Daskalakis, G and Kyriakis, A and Papadopoulos, A and Stakia, A and Kontaxakis, P and Melachroinos, G and Panagiotou, A and Papavergou, I and Paraskevas, I and Saoulidou, N and Theofilatos, K and Tziaferi, E and Vellidis, K and Zisopoulos, I and Bakas, G and Chatzistavrou, T and Karapostoli, G and Kousouris, K and Papakrivopoulos, I and Siamarkou, E and Tsipolitis, G and Zacharopoulou, A and Adamidis, K and Bestintzanos, I and Evangelou, I and Foudas, C and Kamtsikis, C and Katsoulis, P and Kokkas, P and Kioseoglou, PGK and Manthos, N and Papadopoulos, I and Strologas, J and Bartók, M and Hajdu, C and Horvath, D and Márton, K and Sikler, F and Veszpremi, V and Csanád, M and Farkas, K and Gadallah, MMA and Kadlecsik, Á and Major, P and Mandal, K and Pásztor, G and Rádl, AJ and Veres, GI and Raics, P and Ujvari, B and Zilizi, G and Bencze, G and Czellar, S and Molnar, J and Szillasi, Z and Csorgo, T and Nemes, F and Novak, T and Babbar, J and Bansal, S and Beri, SB and Bhatnagar, V and Chaudhary, G and Chauhan, S and Dhingra, N and Kaur, A and Kaur, A and Kaur, H and Kaur, M and Kumar, S and Sandeep, K and Sheokand, T and Singh, JB and Singla, A and Ahmed, A and Bhardwaj, A and Chhetri, A and Choudhary, BC and Kumar, A and Kumar, A and Naimuddin, M and Ranjan, K and Saumya, S and Baradia, S and Barman, S and Bhattacharya, S and Dutta, S and Dutta, S and Sarkar, S and Ameen, MM and Behera, PK and Behera, SC and Chatterjee, S and Jana, P and Kalbhor, P and Komaragiri, JR and Kumar, D and Pujahari, PR and Saha, NR and Sharma, A and Sikdar, AK and Verma, S and Dugad, S and Kumar, M and Mohanty, GB and Suryadevara, P and Bala, A and Banerjee, S and Chatterjee, RM and Dewanjee, RK and Guchait, M and Jain, S and Jaiswal, A and Karmakar, S and Kumar, S and Majumder, G and Mazumdar, K and Parolia, S and Thachayath, A and Bahinipati, S and Kar, C and Maity, D and Mal, P and Mishra, T and Bindhu, VKMN and Naskar, K and Nayak, A and Sadangi, P and Saha, P and Swain, SK and Varghese, S and Vats, D and Acharya, S and Alpana, A and Dube, S and Gomber, B and Kansal, B and Laha, A and Sahu, B and Sharma, S and Vaish, KY and Bakhshiansohi, H and Khazaie, E and Zeinali, M and Chenarani, S and Etesami, SM and Khakzad, M and Najafabadi, MM and Grunewald, M and Abbrescia, M and Aly, R and Colaleo, A and Creanza, D and D'Anzi, B and De Filippis, N and De Palma, M and Florio, AD and Elmetenawee, W and Fiore, L and Iaselli, G and Louka, M and Maggi, G and Maggi, M and Margjeka, I and Mastrapasqua, V and My, S and Nuzzo, S and Pellecchia, A and Pompili, A and Pugliese, G and Radogna, R and Ramirez-Sanchez, G and Ramos, D and Ranieri, A and Silvestris, L and Simone, FM and Sözbilir, Ü and Stamerra, A and Venditti, R and Verwilligen, P and Zaza, A and Abbiendi, G and Battilana, C and Bonacorsi, D and Borgonovi, L and Campanini, R and Capiluppi, P and Castro, A and Cavallo, FR and Cuffiani, M and Dallavalle, GM and Diotalevi, T and Fanfani, A and Fasanella, D and Giacomelli, P and Giommi, L and Grandi, C and Guiducci, L and Meo, SL and Lunerti, L and Marcellini, S and Masetti, G and Navarria, FL and Perrotta, A and Primavera, F and Rossi, AM and Rovelli, T and Siroli, GP and Costa, S and Mattia, AD and Potenza, R and Tricomi, A and Tuve, C and Assiouras, P and Barbagli, G and Bardelli, G and Camaiani, B and Cassese, A and Ceccarelli, R and Ciulli, V and Civinini, C and D'Alessandro, R and Focardi, E and Kello, T and Latino, G and Lenzi, P and Lizzo, M and Meschini, M and Paoletti, S and Papanastassiou, A and Sguazzoni, G and Viliani, L and Benussi, L and Bianco, S and Meola, S and Piccolo, D and Chatagnon, P and Ferro, F and Robutti, E and Tosi, S and Benaglia, A and Boldrini, G and Brivio, F and Cetorelli, F and De Guio, F and Dinardo, ME and Dini, P and Gennai, S and Gerosa, R and Ghezzi, A and Govoni, P and Guzzi, L and Lucchini, MT and Malberti, M and Malvezzi, S and Massironi, A and Menasce, D and Moroni, L and Paganoni, M and Pedrini, D and Pinolini, BS and Ragazzi, S and de Fatis, TT and Zuolo, D and Buontempo, S and Cagnotta, A and Carnevali, F and Cavallo, N and Fabozzi, F and Iorio, AOM and Lista, L and Paolucci, P and Rossi, B and Sciacca, C and Ardino, R and Azzi, P and Bacchetta, N and Bisello, D and Bortignon, P and Bragagnolo, A and Checchia, P and Dorigo, T and Gasparini, U and Lusiani, E and Margoni, M and Marini, F and Meneguzzo, AT and Migliorini, M and Passaseo, M and Pazzini, J and Ronchese, P and Rossin, R and Sgaravatto, M and Simonetto, F and Strong, G and Tosi, M and Triossi, A and Ventura, S and Yarar, H and Zanetti, M and Zotto, P and Zucchetta, A and Zumerle, G and Zeid, SA and Aimè, C and Braghieri, A and Calzaferri, S and Fiorina, D and Montagna, P and Re, V and Riccardi, C and Salvini, P and Vai, I and Vitulo, P and Ajmal, S and Bilei, GM and Ciangottini, D and Fanò, L and Magherini, M and Mantovani, G and Mariani, V and Menichelli, M and Moscatelli, F and Rossi, A and Santocchia, A and Spiga, D and Tedeschi, T and Asenov, P and Azzurri, P and Bagliesi, G and Bhattacharya, R and Bianchini, L and Boccali, T and Bossini, E and Bruschini, D and Castaldi, R and Ciocci, MA and Cipriani, M and D'Amante, V and Dell'Orso, R and Donato, S and Giassi, A and Ligabue, F and Figueiredo, DM and Messineo, A and Musich, M and Palla, F and Rizzi, A and Rolandi, G and Chowdhury, SR and Sarkar, T and Scribano, A and Spagnolo, P and Tenchini, R and Tonelli, G and Turini, N and Venturi, A and Verdini, PG and Barria, P and Basile, C and Campana, M and Cavallari, F and Mendez, LC and Re, DD and Marco, ED and Diemoz, M and Errico, F and Longo, E and Meridiani, P and Mijuskovic, J and Organtini, G and Pandolfi, F and Paramatti, R and Quaranta, C and Rahatlou, S and Rovelli, C and Santanastasio, F and Soffi, L and Amapane, N and Arcidiacono, R and Argiro, S and Arneodo, M and Bartosik, N and Bellan, R and Bellora, A and Biino, C and Borca, C and Cartiglia, N and Costa, M and Covarelli, R and Demaria, N and Finco, L and Grippo, M and Kiani, B and Legger, F and Luongo, F and Mariotti, C and Markovic, L and Maselli, S and Mecca, A and Migliore, E and Monteno, M and Mulargia, R and Obertino, MM and Ortona, G and Pacher, L and Pastrone, N and Pelliccioni, M and Ruspa, M and Siviero, F and Sola, V and Solano, A and Staiano, A and Tarricone, C and Trocino, D and Umoret, G and Vlasov, E and Belforte, S and Candelise, V and Casarsa, M and Cossutti, F and De Leo, K and Ricca, GD and Dogra, S and Hong, J and Huh, C and Kim, B and Kim, DH and Kim, J and Lee, H and Lee, SW and Moon, CS and Oh, YD and Ryu, MS and Sekmen, S and Yang, YC and Kim, MS and Bak, G and Gwak, P and Kim, H and Moon, DH and Asilar, E and Kim, D and Kim, TJ and Merlin, JA and Choi, S and Han, S and Hong, B and Lee, K and Lee, KS and Lee, S and Park, J and Park, SK and Yoo, J and Goh, J and Yang, S and Kim, HS and Kim, Y and Lee, S and Almond, J and Bhyun, JH and Choi, J and Jun, W and Kim, J and Ko, S and Kwon, H and Lee, H and Lee, J and Lee, J and Oh, BH and Oh, SB and Seo, H and Yang, UK and Yoon, I and Jang, W and Kang, DY and Kang, Y and Kim, S and Ko, B and Lee, JSH and Lee, Y and Park, IC and Roh, Y and Watson, IJ and Ha, S and Yoo, HD and Choi, M and Kim, MR and Lee, H and Lee, Y and Yu, I and Beyrouthy, T and Maghrbi, Y and Dreimanis, K and Gaile, A and Pikurs, G and Potrebko, A and Seidel, M and Veckalns, V and Strautnieks, NR and Ambrozas, M and Juodagalvis, A and Rinkevicius, A and Tamulaitis, G and Norjoharuddeen, NB and Yusuff, I and Zolkapli, Z and Benitez, JF and Hernandez, AC and Acosta, HAE and Maríñez, LGG and Coello, ML and Quijada, JAM and Sehrawat, A and Palomo, LV and Ayala, G and Castilla-Valdez, H and Ledesma, HC and De La Cruz-Burelo, E and La Cruz, IH and Lopez-Fernandez, R and Herrera, CAM and Hernández, AS and Barrera, CO and García, MR and Bautista, I and Pedraza, I and Ibarguen, HAS and Estrada, CU and Bubanja, I and Raicevic, N and Butler, PH and Ahmad, A and Asghar, MI and Awais, A and Awan, MIM and Hoorani, HR and Khan, WA and Avati, V and Grzanka, L and Malawski, M and Bialkowska, H and Bluj, M and Boimska, B and Górski, M and Kazana, M and Szleper, M and Zalewski, P and Bunkowski, K and Doroba, K and Kalinowski, A and Konecki, M and Krolikowski, J and Muhammad, A and Pozniak, K and Zabolotny, W and Araujo, M and Bastos, D and Da Cruz E Silva, CB and Boletti, A and Bozzo, M and Camporesi, T and Da Molin, G and Faccioli, P and Gallinaro, M and Hollar, J and Leonardo, N and Niknejad, T and Petrilli, A and Pisano, M and Seixas, J and Varela, J and Wulff, JW and Adzic, P and Milenovic, P and Dordevic, M and Milosevic, J and Rekovic, V and Aguilar-Benitez, M and Maestre, JA and Bedoya, CF and Cepeda, M and Cerrada, M and Colino, N and De La Cruz, B and Peris, AD and Valle, AED and Val, DFD and Ramos, JPF and Flix, J and Fouz, MC and Lopez, OG and Lopez, SG and Hernandez, JM and Josa, MI and Moran, D and Perez, CMM and Tobar, ÁN and Dengra, CP and Yzquierdo, AP and Pelayo, JP and Redondo, I and Ferrero, DDR and Romero, L and Navas, SS and Gómez, LU and Escobar, JV and Willmott, C and de Trocóniz, JF and Gonzalez, BA and Cuevas, J and Menendez, JF and Folgueras, S and Caballero, IG and Fernández, JRG and Cortezon, EP and Álvarez, CR and Bouza, VR and Rodríguez, AS and Trapote, A and Villalba, CV and Vischia, P and Bhowmik, S and Fernández, SB and Cifuentes, JAB and Cabrillo, IJ and Calderon, A and Campderros, JD and Fernandez, M and Gomez, G and García, CL and Rivero, CM and Arbol, PMRD and Matorras, F and Cuevas, PM and Ramos, EN and Gomez, JP and Scodellaro, L and Vila, I and Garcia, JMV and Jayananda, MK and Kailasapathy, B and Sonnadara, DUJ and Wickramarathna, DDC and Dharmaratna, WGD and Liyanage, K and Perera, N and Wickramage, N and Abbaneo, D and Amendola, C and Auffray, E and Auzinger, G and Baechler, J and Barney, D and Martínez, AB and Bianco, M and Bilin, B and Anuar, AAB and Bocci, A and Botta, C and Brondolin, E and Caillol, C and Cerminara, G and Chernyavskaya, N and d'Enterria, D and Dabrowski, A and David, A and De Roeck, A and Defranchis, MM and Deile, M and Dobson, M and Forthomme, L and Franzoni, G and Funk, W and Giani, S and Gigi, D and Gill, K and Glege, F and Gouskos, L and Haranko, M and Hegeman, J and Huber, B and Innocente, V and James, T and Janot, P and Laurila, S and Lecoq, P and Leutgeb, E and Lourenço, C and Maier, B and Malgeri, L and Mannelli, M and Marini, AC and Matthewman, M and Meijers, F and Mersi, S and Meschi, E and Milosevic, V and Monti, F and Moortgat, F and Mulders, M and Neutelings, I and Orfanelli, S and Pantaleo, F and Petrucciani, G and Pfeiffer, A and Pierini, M and Piparo, D and Qu, H and Rabady, D and Gutiérrez, GR and Rovere, M and Sakulin, H and Scarfi, S and Schwick, C and Selvaggi, M and Sharma, A and Shchelina, K and Silva, P and Sphicas, P and Leiton, AGS and Steen, A and Summers, S and Treille, D and Tropea, P and Tsirou, A and Walter, D and Wanczyk, J and Wang, J and Wuchterl, S and Zehetner, P and Zejdl, P and Zeuner, WD and Bevilacqua, T and Caminada, L and Ebrahimi, A and Erdmann, W and Horisberger, R and Ingram, Q and Kaestli, HC and Kotlinski, D and Lange, C and Missiroli, M and Noehte, L and Rohe, T and Aarrestad, TK and Androsov, K and Backhaus, M and Calandri, A and Cazzaniga, C and Datta, K and De Cosa, A and Dissertori, G and Dittmar, M and Donegà, M and Eble, F and Galli, M and Gedia, K and Glessgen, F and Grab, C and Hits, D and Lustermann, W and Lyon, AM and Manzoni, RA and Marchegiani, M and Marchese, L and Perez, CM and Mascellani, A and Nessi-Tedaldi, F and Pauss, F and Perovic, V and Pigazzini, S and Reissel, C and Reitenspiess, T and Ristic, B and Riti, F and Seidita, R and Steggemann, J and Valsecchi, D and Wallny, R and Amsler, C and Bärtschi, P and Brzhechko, D and Canelli, MF and Cormier, K and Heikkilä, JK and Huwiler, M and Jin, W and Jofrehei, A and Kilminster, B and Leontsinis, S and Liechti, SP and Macchiolo, A and Meiring, P and Molinatti, U and Reimers, A and Robmann, P and Cruz, SS and Senger, M and Stäger, F and Takahashi, Y and Tramontano, R and Adloff, C and Bhowmik, D and Kuo, CM and Lin, W and Rout, PK and Tiwari, PC and Yu, SS and Ceard, L and Chao, Y and Chen, KF and Chen, PS and Chen, ZG and De Iorio, A and Hou, WS and Hsu, TH and Kao, YW and Khurana, R and Kole, G and Li, YY and Lu, RS and Paganis, E and Su, XF and Thomas-Wilsker, J and Tsai, LS and Wu, HY and Yazgan, E and Asawatangtrakuldee, C and Srimanobhas, N and Wachirapusitanand, V and Agyel, D and Boran, F and Demiroglu, ZS and Dolek, F and Dumanoglu, I and Eskut, E and Guler, Y and Guler, EG and Isik, C and Kara, O and Topaksu, AK and Kiminsu, U and Onengut, G and Ozdemir, K and Polatoz, A and Tali, B and Tok, UG and Turkcapar, S and Uslan, E and Zorbakir, IS and Yalvac, M and Akgun, B and Atakisi, IO and Gülmez, E and Kaya, M and Kaya, O and Tekten, S and Cakir, A and Cankocak, K and Komurcu, Y and Sen, S and Aydilek, O and Cerci, S and Epshteyn, V and Hacisahinoglu, B and Hos, I and Kaynak, B and Ozkorucuklu, S and Potok, O and Sert, H and Simsek, C and Zorbilmez, C and Isildak, B and Cerci, DS and Boyaryntsev, A and Grynyov, B and Levchuk, L and Anthony, D and Brooke, JJ and Bundock, A and Bury, F and Clement, E and Cussans, D and Flacher, H and Glowacki, M and Goldstein, J and Heath, HF and Kreczko, L and Paramesvaran, S and Robertshaw, L and Nasr-Storey, SSE and Smith, VJ and Stylianou, N and Pass, KW and White, R and Ball, AH and Bell, KW and Belyaev, A and Brew, C and Brown, RM and Cockerill, DJA and Cooke, C and Ellis, KV and Harder, K and Harper, S and Holmberg, ML and Linacre, J and Manolopoulos, K and Newbold, DM and Olaiya, E and Petyt, D and Reis, T and Salvi, G and Schuh, T and Shepherd-Themistocleous, CH and Tomalin, IR and Williams, T and Bainbridge, R and Bloch, P and Brown, CE and Buchmuller, O and Cacchio, V and Montoya, CAC and Chahal, GS and Colling, D and Dancu, JS and Das, I and Dauncey, P and Davies, G and Davies, J and Negra, MD and Fayer, S and Fedi, G and Hall, G and Hassanshahi, MH and Howard, A and Iles, G and Knight, M and Langford, J and Holgado, JL and Lyons, L and Magnan, AM and Malik, S and Mieskolainen, M and Nash, J and Pesaresi, M and Radburn-Smith, BC and Richards, A and Rose, A and Savva, K and Seez, C and Shukla, R and Tapper, A and Uchida, K and Uttley, GP and Vage, LH and Virdee, T and Vojinovic, M and Wardle, N and Winterbottom, D and Coldham, K and Cole, JE and Khan, A and Kyberd, P and Reid, ID and Abdullin, S and Brinkerhoff, A and Caraway, B and Dittmann, J and Hatakeyama, K and Hiltbrand, J and McMaster, B and Saunders, M and Sawant, S and Sutantawibul, C and Wilson, J and Bartek, R and Dominguez, A and Escamilla, CH and Simsek, AE and Uniyal, R and Hernandez, AMV and Bam, B and Chudasama, R and Cooper, SI and Gleyzer, SV and Perez, CU and Rumerio, P and Usai, E and Yi, R and Akpinar, A and Arcaro, D and Cosby, C and Demiragli, Z and Erice, C and Fangmeier, C and Madrazo, CF and Fontanesi, E and Gastler, D and Golf, F and Jeon, S and Reed, I and Rohlf, J and Salyer, K and Sperka, D and Spitzbart, D and Suarez, I and Tsatsos, A and Yuan, S and Zecchinelli, AG and Benelli, G and Coubez, X and Cutts, D and Hadley, M and Heintz, U and Hogan, JM and Kwon, T and Landsberg, G and Lau, KT and Li, D and Luo, J and Mondal, S and Narain, M and Pervan, N and Sagir, S and Simpson, F and Stamenkovic, M and Yan, X and Zhang, W and Abbott, S and Bonilla, J and Brainerd, C and Breedon, R and De La Barca Sanchez, MC and Chertok, M and Citron, M and Conway, J and Cox, PT and Erbacher, R and Jensen, F and Kukral, O and Mocellin, G and Mulhearn, M and Pellett, D and Wei, W and Yao, Y and Zhang, F and Bachtis, M and Cousins, R and Datta, A and Avila, GF and Hauser, J and Ignatenko, M and Iqbal, MA and Lam, T and Manca, E and Prado, AND and Saltzberg, D and Valuev, V and Clare, R and Gary, JW and Gordon, M and Hanson, G and Si, W and Wimpenny, S and Branson, JG and Cittolin, S and Cooperstein, S and Diaz, D and Duarte, J and Giannini, L and Guiang, J and Kansal, R and Krutelyov, V and Lee, R and Letts, J and Masciovecchio, M and Mokhtar, F and Mukherjee, S and Pieri, M and Quinnan, M and Narayanan, BVS and Sharma, V and Tadel, M and Vourliotis, E and Würthwein, F and Xiang, Y and Yagil, A and Barzdukas, A and Brennan, L and Campagnari, C and Incandela, J and Kim, J and Li, AJ and Masterson, P and Mei, H and Richman, J and Sarica, U and Schmitz, R and Setti, F and Sheplock, J and Stuart, D and Vámi, TÁ and Wang, S and Bornheim, A and Cerri, O and Latorre, A and Mao, J and Newman, HB and Spiropulu, M and Vlimant, JR and Wang, C and Xie, S and Zhu, RY and Alison, J and An, S and Andrews, MB and Bryant, P and Cremonesi, M and Dutta, V and Ferguson, T and Harilal, A and Liu, C and Mudholkar, T and Murthy, S and Palit, P and Paulini, M and Roberts, A and Sanchez, A and Terrill, W and Cumalat, JP and Ford, WT and Hart, A and Hassani, A and Karathanasis, G and MacDonald, E and Manganelli, N and Perloff, A and Savard, C and Schonbeck, N and Stenson, K and Ulmer, KA and Wagner, SR and Zipper, N and Alexander, J and Bright-Thonney, S and Chen, X and Cranshaw, DJ and Fan, J and Fan, X and Gadkari, D and Hogan, S and Kotamnives, P and Monroy, J and Oshiro, M and Patterson, JR and Reichert, J and Reid, M and Ryd, A and Thom, J and Wittich, P and Zou, R and Albrow, M and Alyari, M and Amram, O and Apollinari, G and Apresyan, A and Bauerdick, LAT and Berry, D and Berryhill, J and Bhat, PC and Burkett, K and Butler, JN and Canepa, A and Cerati, GB and Cheung, HWK and Chlebana, F and Cummings, G and Dickinson, J and Dutta, I and Elvira, VD and Feng, Y and Freeman, J and Gandrakota, A and Gecse, Z and Gray, L and Green, D and Grummer, A and Grünendahl, S and Guerrero, D and Gutsche, O and Harris, RM and Heller, R and Herwig, TC and Hirschauer, J and Horyn, L and Jayatilaka, B and Jindariani, S and Johnson, M and Joshi, U and Klijnsma, T and Klima, B and Kwok, KHM and Lammel, S and Lincoln, D and Lipton, R and Liu, T and Madrid, C and Maeshima, K and Mantilla, C and Mason, D and McBride, P and Merkel, P and Mrenna, S and Nahn, S and Ngadiuba, J and Noonan, D and Papadimitriou, V and Pastika, N and Pedro, K and Pena, C and Ravera, F and Hall, AR and Ristori, L and Sexton-Kennedy, E and Smith, N and Soha, A and Spiegel, L and Stoynev, S and Strait, J and Taylor, L and Tkaczyk, S and Tran, NV and Uplegger, L and Vaandering, EW and Zoi, I and Aruta, C and Avery, P and Bourilkov, D and Cadamuro, L and Chang, P and Cherepanov, V and Field, RD and Koenig, E and Kolosova, M and Konigsberg, J and Korytov, A and Matchev, K and Menendez, N and Mitselmakher, G and Mohrman, K and Madhu, AM and Rawal, N and Rosenzweig, D and Rosenzweig, S and Wang, J and Adams, T and Kadhim, AA and Askew, A and Bower, S and Habibullah, R and Hagopian, V and Hashmi, R and Kim, RS and Kim, S and Kolberg, T and Martinez, G and Prosper, H and Prova, PR and Wulansatiti, M and Yohay, R and Zhang, J and Alsufyani, B and Baarmand, MM and Butalla, S and Elkafrawy, T and Hohlmann, M and Verma, RK and Rahmani, M and Yanes, E and Adams, MR and Baty, A and Bennett, C and Cavanaugh, R and Franco, RE and Evdokimov, O and Gerber, CE and Hofman, DJ and Lee, JH and Lemos, DS and Merrit, AH and Mills, C and Nanda, S and Oh, G and Ozek, B and Pilipovic, D and Pradhan, R and Roy, T and Rudrabhatla, S and Tonjes, MB and Varelas, N and Ye, Z and Yoo, J and Alhusseini, M and Blend, D and Dilsiz, K and Emediato, L and Karaman, G and Köseyan, OK and Merlo, JP and Mestvirishvili, A and Nachtman, J and Neogi, O and Ogul, H and Onel, Y and Penzo, A and Snyder, C and Tiras, E and Blumenfeld, B and Corcodilos, L and Davis, J and Gritsan, AV and Kang, L and Kyriacou, S and Maksimovic, P and Roguljic, M and Roskes, J and Sekhar, S and Swartz, M and Abreu, A and Alcerro, LFA and Anguiano, J and Baringer, P and Bean, A and Flowers, Z and Grove, D and King, J and Krintiras, G and Lazarovits, M and Mahieu, CL and Marquez, J and Minafra, N and Murray, M and Nickel, M and Pitt, M and Popescu, S and Rogan, C and Royon, C and Salvatico, R and Sanders, S and Smith, C and Wang, Q and Wilson, G and Allmond, B and Ivanov, A and Kaadze, K and Kalogeropoulos, A and Kim, D and Maravin, Y and Natoli, J and Roy, D and Sorrentino, G and Rebassoo, F and Wright, D and Baden, A and Belloni, A and Chen, YM and Eno, SC and Hadley, NJ and Jabeen, S and Kellogg, RG and Koeth, T and Lai, Y and Lascio, S and Mignerey, AC and Nabili, S and Palmer, C and Papageorgakis, C and Paranjpe, MM and Wang, L and Bendavid, J and Cali, IA and D'Alfonso, M and Eysermans, J and Freer, C and Gomez-Ceballos, G and Goncharov, M and Grosso, G and Harris, P and Hoang, D and Kovalskyi, D and Krupa, J and Lavezzo, L and Lee, YJ and Long, K and Novak, A and Paus, C and Rankin, D and Roland, C and Roland, G and Rothman, S and Stephans, GSF and Wang, Z and Wyslouch, B and Yang, TJ and Crossman, B and Joshi, BM and Kapsiak, C and Krohn, M and Mahon, D and Mans, J and Marzocchi, B and Pandey, S and Revering, M and Rusack, R and Saradhy, R and Schroeder, N and Strobbe, N and Wadud, MA and Cremaldi, LM and Bloom, K and Claes, DR and Haza, G and Hossain, J and Joo, C and Kravchenko, I and Siado, JE and Tabb, W and Vagnerini, A and Wightman, A and Yan, F and Yu, D and Bandyopadhyay, H and Hay, L and Iashvili, I and Kharchilava, A and Morris, M and Nguyen, D and Rappoccio, S and Sfar, HR and Williams, A and Alverson, G and Barberis, E and Dervan, J and Haddad, Y and Han, Y and Krishna, A and Li, J and Lu, M and Madigan, G and Mccarthy, R and Morse, DM and Nguyen, V and Orimoto, T and Parker, A and Skinnari, L and Wang, B and Wood, D and Bhattacharya, S and Bueghly, J and Chen, Z and Dittmer, S and Hahn, KA and Liu, Y and Miao, Y and Monk, DG and Schmitt, MH and Taliercio, A and Velasco, M and Agarwal, G and Band, R and Bucci, R and Castells, S and Das, A and Goldouzian, R and Hildreth, M and Ho, KW and Anampa, KH and Ivanov, T and Jessop, C and Lannon, K and Lawrence, J and Loukas, N and Lutton, L and Mariano, J and Marinelli, N and Mcalister, I and McCauley, T and Mcgrady, C and Moore, C and Musienko, Y and Nelson, H and Osherson, M and Piccinelli, A and Ruchti, R and Townsend, A and Wan, Y and Wayne, M and Yockey, H and Zarucki, M and Zygala, L and Basnet, A and Bylsma, B and Carrigan, M and Durkin, LS and Hill, C and Joyce, M and Ornelas, MN and Wei, K and Winer, BL and Yates, BR and Addesa, FM and Bouchamaoui, H and Das, P and Dezoort, G and Elmer, P and Frankenthal, A and Greenberg, B and Haubrich, N and Kopp, G and Kwan, S and Lange, D and Loeliger, A and Marlow, D and Ojalvo, I and Olsen, J and Shevelev, A and Stickland, D and Tully, C and Malik, S and Bakshi, AS and Barnes, VE and Chandra, S and Chawla, R and Das, S and Gu, A and Gutay, L and Jones, M and Jung, AW and Kondratyev, D and Koshy, AM and Liu, M and Negro, G and Neumeister, N and Paspalaki, G and Piperov, S and Scheurer, V and Schulte, JF and Stojanovic, M and Thieman, J and Virdi, AK and Wang, F and Xie, W and Dolen, J and Parashar, N and Pathak, A and Acosta, D and Carnahan, T and Ecklund, KM and Manteca, PJF and Freed, S and Gardner, P and Geurts, FJM and Li, W and Colin, OM and Padley, BP and Redjimi, R and Rotter, J and Yigitbasi, E and Zhang, Y and Bodek, A and de Barbaro, P and Demina, R and Dulemba, JL and Garcia-Bellido, A and Hindrichs, O and Khukhunaishvili, A and Parmar, N and Parygin, P and Popova, E and Taus, R and Goulianos, K and Chiarito, B and Chou, JP and Gershtein, Y and Halkiadakis, E and Heindl, M and Houghton, C and Jaroslawski, D and Karacheban, O and Laflotte, I and Lath, A and Montalvo, R and Nash, K and Routray, H and Salur, S and Schnetzer, S and Somalwar, S and Stone, R and Thayil, SA and Thomas, S and Vora, J and Wang, H and Acharya, H and Ally, D and Delannoy, AG and Fiorendi, S and Higginbotham, S and Holmes, T and Kanuganti, AR and Karunarathna, N and Lee, L and Nibigira, E and Spanier, S and Aebi, D and Ahmad, M and Bouhali, O and Eusebi, R and Gilmore, J and Huang, T and Kamon, T and Kim, H and Luo, S and Mueller, R and Overton, D and Rathjens, D and Safonov, A and Akchurin, N and Damgov, J and Hegde, V and Hussain, A and Kazhykarim, Y and Lamichhane, K and Lee, SW and Mankel, A and Peltola, T and Volobouev, I and Whitbeck, A and Appelt, E and Chen, Y and Greene, S and Gurrola, A and Johns, W and Elayavalli, RK and Melo, A and Romeo, F and Sheldon, P and Tuo, S and Velkovska, J and Viinikainen, J and Cardwell, B and Cox, B and Hakala, J and Hirosky, R and Ledovskoy, A and Neu, C and Lara, CEP and Karchin, PE and Aravind, A and Banerjee, S and Black, K and Bose, T and Dasu, S and De Bruyn, I and Everaerts, P and Galloni, C and He, H and Herndon, M and Herve, A and Koraka, CK and Lanaro, A and Loveless, R and Sreekala, JM and Mallampalli, A and Mohammadi, A and Mondal, S and Parida, G and Pétré, L and Pinna, D and Savin, A and Shang, V and Sharma, V and Smith, WH and Teague, D and Tsoi, HF and Vetens, W and Warden, A and Afanasiev, S and Andreev, V and Andreev, Y and Aushev, T and Azarkin, M and Babaev, A and Belyaev, A and Blinov, V and Boos, E and Borshch, V and Budkouski, D and Chadeeva, M and Chekhovsky, V and Chistov, R and Demiyanov, A and Dermenev, A and Dimova, T and Druzhkin, D and Dubinin, M and Dudko, L and Ershov, A and Gavrilov, G and Gavrilov, V and Gninenko, S and Golovtcov, V and Golubev, N and Golutvin, I and Gorbunov, I and Gribushin, A and Ivanov, Y and Kachanov, V and Karjavine, V and Karneyeu, A and Kim, V and Kirakosyan, M and Kirpichnikov, D and Kirsanov, M and Klyukhin, V and Kodolova, O and Korenkov, V and Kozyrev, A and Krasnikov, N and Lanev, A and Levchenko, P and Lychkovskaya, N and Makarenko, V and Malakhov, A and Matveev, V and Murzin, V and Nikitenko, A and Obraztsov, S and Oreshkin, V and Palichik, V and Perelygin, V and Petrushanko, S and Polikarpov, S and Popov, V and Radchenko, O and Savina, M and Savrin, V and Shalaev, V and Shmatov, S and Shulha, S and Skovpen, Y and Slabospitskii, S and Smirnov, V and Snigirev, A and Sosnov, D and Sulimov, V and Tcherniaev, E and Terkulov, A and Teryaev, O and Tlisova, I and Toropin, A and Uvarov, L and Uzunian, A and Vorobyev, A and Voytishin, N and Yuldashev, BS and Zarubin, A and Zhizhin, I and Zhokin, A}, title = {Portable Acceleration of CMS Computing Workflows with Coprocessors as a Service.}, journal = {Computing and software for big science}, volume = {8}, number = {1}, pages = {17}, pmid = {39248308}, issn = {2510-2044}, abstract = {Computing demands for large scientific experiments, such as the CMS experiment at the CERN LHC, will increase dramatically in the next decades. To complement the future performance increases of software running on central processing units (CPUs), explorations of coprocessor usage in data processing hold great potential and interest. Coprocessors are a class of computer processors that supplement CPUs, often improving the execution of certain functions due to architectural design choices. We explore the approach of Services for Optimized Network Inference on Coprocessors (SONIC) and study the deployment of this as-a-service approach in large-scale data processing. In the studies, we take a data processing workflow of the CMS experiment and run the main workflow on CPUs, while offloading several machine learning (ML) inference tasks onto either remote or local coprocessors, specifically graphics processing units (GPUs). With experiments performed at Google Cloud, the Purdue Tier-2 computing center, and combinations of the two, we demonstrate the acceleration of these ML algorithms individually on coprocessors and the corresponding throughput improvement for the entire workflow. This approach can be easily generalized to different types of coprocessors and deployed on local CPUs without decreasing the throughput performance. We emphasize that the SONIC approach enables high coprocessor usage and enables the portability to run workflows on different types of coprocessors.}, } @article {pmid39242829, year = {2024}, author = {Tanade, C and Khan, NS and Rakestraw, E and Ladd, WD and Draeger, EW and Randles, A}, title = {Establishing the longitudinal hemodynamic mapping framework for wearable-driven coronary digital twins.}, journal = {NPJ digital medicine}, volume = {7}, number = {1}, pages = {236}, pmid = {39242829}, issn = {2398-6352}, support = {DP1AG082343//U.S. Department of Health & Human Services | National Institutes of Health (NIH)/ ; 164486//National Science Foundation (NSF)/ ; DP1AG082343//U.S. Department of Health & Human Services | National Institutes of Health (NIH)/ ; DP1AG082343//U.S. Department of Health & Human Services | National Institutes of Health (NIH)/ ; }, abstract = {Understanding the evolving nature of coronary hemodynamics is crucial for early disease detection and monitoring progression. We require digital twins that mimic a patient's circulatory system by integrating continuous physiological data and computing hemodynamic patterns over months. Current models match clinical flow measurements but are limited to single heartbeats. To this end, we introduced the longitudinal hemodynamic mapping framework (LHMF), designed to tackle critical challenges: (1) computational intractability of explicit methods; (2) boundary conditions reflecting varying activity states; and (3) accessible computing resources for clinical translation. We show negligible error (0.0002-0.004%) between LHMF and explicit data of 750 heartbeats. We deployed LHMF across traditional and cloud-based platforms, demonstrating high-throughput simulations on heterogeneous systems. Additionally, we established LHMFC, where hemodynamically similar heartbeats are clustered to avoid redundant simulations, accurately reconstructing longitudinal hemodynamic maps (LHMs). This study captured 3D hemodynamics over 4.5 million heartbeats, paving the way for cardiovascular digital twins.}, } @article {pmid39237930, year = {2024}, author = {Vesselle, H and Chiramal, JA and Hawes, SE and Schulze, E and Nguyen, T and Ndumia, R and Vinayak, S}, title = {Development of an online authentic radiology viewing and reporting platform to test the skills of radiology trainees in Low- and Middle-Income Countries.}, journal = {BMC medical education}, volume = {24}, number = {1}, pages = {969}, pmid = {39237930}, issn = {1472-6920}, mesh = {Humans ; *Radiology/education ; *Developing Countries ; Kenya ; *Internship and Residency ; *Clinical Competence ; *Radiology Information Systems ; Tomography, X-Ray Computed ; }, abstract = {BACKGROUND: Diagnostic radiology residents in low- and middle-income countries (LMICs) may have to provide significant contributions to the clinical workload before the completion of their residency training. Because of time constraints inherent to the delivery of acute care, some of the most clinically impactful diagnostic radiology errors arise from the use of Computed Tomography (CT) in the management of acutely ill patients. As a result, it is paramount to ensure that radiology trainees reach adequate skill levels prior to assuming independent on-call responsibilities. We partnered with the radiology residency program at the Aga Khan University Hospital in Nairobi (Kenya) to evaluate a novel cloud-based testing method that provides an authentic radiology viewing and interpretation environment. It is based on Lifetrack, a unique Google Chrome-based Picture Archiving and Communication System, that enables a complete viewing environment for any scan, and provides a novel report generation tool based on Active Templates which are a patented structured reporting method. We applied it to evaluate the skills of AKUHN trainees on entire CT scans representing the spectrum of acute non-trauma abdominal pathology encountered in a typical on-call setting. We aimed to demonstrate the feasibility of remotely testing the authentic practice of radiology and to show that important observations can be made from such a Lifetrack-based testing approach regarding the radiology skills of an individual practitioner or of a cohort of trainees.

METHODS: A total of 13 anonymized trainees with experience from 12 months to over 4 years took part in the study. Individually accessing the Lifetrack tool they were tested on 37 abdominal CT scans (including one normal scan) over six 2-hour sessions on consecutive days. All cases carried the same clinical history of acute abdominal pain. During each session the trainees accessed the corresponding Lifetrack test set using clinical workstations, reviewed the CT scans, and formulated an opinion for the acute diagnosis, any secondary pathology, and incidental findings on the scan. Their scan interpretations were composed using the Lifetrack report generation system based on active templates in which segments of text can be selected to assemble a detailed report. All reports generated by the trainees were scored on four different interpretive components: (a) acute diagnosis, (b) unrelated secondary diagnosis, (c) number of missed incidental findings, and (d) number of overcalls. A 3-score aggregate was defined from the first three interpretive elements. A cumulative score modified the 3-score aggregate for the negative effect of interpretive overcalls.

RESULTS: A total of 436 scan interpretations and scores were available from 13 trainees tested on 37 cases. The acute diagnosis score ranged from 0 to 1 with a mean of 0.68 ± 0.36 and median of 0.78 (IQR: 0.5-1), and there were 436 scores. An unrelated secondary diagnosis was present in 11 cases, resulting in 130 secondary diagnosis scores. The unrelated secondary diagnosis score ranged from 0 to 1, with mean score of 0.48 ± 0.46 and median of 0.5 (IQR: 0-1). There were 32 cases with incidental findings, yielding 390 scores for incidental findings. The number of missed incidental findings ranged from 0 to 5 with a median at 1 (IQR: 1-2). The incidental findings score ranged from 0 to 1 with a mean of 0.4 ± 0.38 and median of 0.33 (IQR: 0- 0.66). The number of overcalls ranged from 0 to 3 with a median at 0 (IQR: 0-1) and a mean of 0.36 ± 0.63. The 3-score aggregate ranged from 0 to 100 with a mean of 65.5 ± 32.5 and median of 77.3 (IQR: 45.0, 92.5). The cumulative score ranged from - 30 to 100 with a mean of 61.9 ± 35.5 and median of 71.4 (IQR: 37.4, 92.0). The mean acute diagnosis scores and SD by training period were 0.62 ± 0.03, 0.80 ± 0.05, 0.71 ± 0.05, 0.58 ± 0.07, and 0.66 ± 0.05 for trainees with ≤ 12 months, 12-24 months, 24-36 months, 36-48 months and > 48 months respectively. The mean acute diagnosis score of 12-24 months training was the only statistically significant greater score when compared to ≤ 12 months by the ANOVA with Tukey testing (p = 0.0002). We found a similar trend with distribution of 3-score aggregates and cumulative scores. There were no significant associations when the training period was categorized as less than and more than 2 years. We looked at the distribution of the 3-score aggregate versus the number of overcalls by trainee, and we found that the 3-score aggregate was inversely related to the number of overcalls. Heatmaps and raincloud plots provided an illustrative means to visualize the relative performance of trainees across cases.

CONCLUSION: We demonstrated the feasibility of remotely testing the authentic practice of radiology and showed that important observations can be made from our Lifetrack-based testing approach regarding radiology skills of an individual or a cohort. From observed weaknesses areas for targeted teaching can be implemented, and retesting could reveal their impact. This methodology can be customized to different LMIC environments and expanded to board certification examinations.}, } @article {pmid39234702, year = {2024}, author = {Holtz, A and Liebe, JD}, title = {Cloud Readiness of German Hospitals: Development and Application of an Evaluation Scale.}, journal = {Studies in health technology and informatics}, volume = {317}, number = {}, pages = {11-19}, doi = {10.3233/SHTI240832}, pmid = {39234702}, issn = {1879-8365}, mesh = {Germany ; *Cloud Computing ; Hospitals ; Computer Security ; Humans ; Surveys and Questionnaires ; }, abstract = {BACKGROUND: In the context of the telematics infrastructure, new data usage regulations, and the growing potential of artificial intelligence, cloud computing plays a key role in driving the digitalization in the German hospital sector.

METHODS: Against this background, the study aims to develop and validate a scale for assessing the cloud readiness of German hospitals. It uses the TPOM (Technology, People, Organization, Macro-Environment) framework to create a scoring system. A survey involving 110 Chief Information Officers (CIOs) from German hospitals was conducted, followed by an exploratory factor analysis and reliability testing to refine the items, resulting in a final set of 30 items.

RESULTS: The analysis confirmed the statistical robustness and identified key factors contributing to cloud readiness. These include IT security in the dimension "technology", collaborative research and acceptance for the need to make high quality data available in the dimension "people", scalability of IT resources in the dimension "organization", and legal aspects in the dimension "macroenvironment". The macroenvironment dimension emerged as particularly stable, highlighting the critical role of regulatory compliance in the healthcare sector.

CONCLUSION: The findings suggest a certain degree of cloud readiness among German hospitals, with potential for improvement in all four dimensions. Systemically, legal requirements and a challenging political environment are top concerns for CIOs, impacting their cloud readiness.}, } @article {pmid39232132, year = {2024}, author = {Said, G and Ghani, A and Ullah, A and Alzahrani, A and Azeem, M and Ahmad, R and Kim, DH}, title = {Fog-assisted de-duplicated data exchange in distributed edge computing networks.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {20595}, pmid = {39232132}, issn = {2045-2322}, abstract = {The Internet of Things (IoT) generates substantial data through sensors for diverse applications, such as healthcare services. This article addresses the challenge of efficiently utilizing resources in resource-scarce IoT-enabled sensors to enhance data collection, transmission, and storage. Redundant data transmission from sensors covering overlapping areas incurs additional communication and storage costs. Existing schemes, namely Asymmetric Extremum (AE) and Rapid Asymmetric Maximum (RAM), employ fixed and variable-sized windows during chunking. However, these schemes face issues while selecting the index value to decide the variable window size, which may remain zero or very low, resulting in poor deduplication. This article resolves this issue in the proposed Controlled Cut-point Identification Algorithm (CCIA), designed to restrict the variable-sized window to a certain threshold. The index value for deciding the threshold will always be larger than the half size of the fixed window. It helps to find more duplicates, but the upper limit offset is also applied to avoid the unnecessarily large-sized window, which may cause extensive computation costs. The extensive simulations are performed by deploying Windows Communication Foundation services in the Azure cloud. The results demonstrate the superiority of CCIA in various metrics, including chunk number, average chunk size, minimum and maximum chunk number, variable chunking size, and probability of failure for cut point identification. In comparison to its competitors, RAM and AE, CCIA exhibits better performance across key parameters. Specifically, CCIA outperforms in total number of chunks (6.81%, 14.17%), average number of chunks (4.39%, 18.45%), and minimum chunk size (153%, 190%). These results highlight the effectiveness of CCIA in optimizing data transmission and storage within IoT systems, showcasing its potential for improved resource utilization and reduced operational costs.}, } @article {pmid39232070, year = {2024}, author = {Jang, H and Koh, H}, title = {A unified web cloud computing platform MiMedSurv for microbiome causal mediation analysis with survival responses.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {20650}, pmid = {39232070}, issn = {2045-2322}, support = {2021R1C1C1013861//National Research Foundation of Korea/ ; }, mesh = {Humans ; *Microbiota ; *Cloud Computing ; *Internet ; Software ; Survival Analysis ; }, abstract = {In human microbiome studies, mediation analysis has recently been spotlighted as a practical and powerful analytic tool to survey the causal roles of the microbiome as a mediator to explain the observed relationships between a medical treatment/environmental exposure and a human disease. We also note that, in a clinical research, investigators often trace disease progression sequentially in time; as such, time-to-event (e.g., time-to-disease, time-to-cure) responses, known as survival responses, are prevalent as a surrogate variable for human health or disease. In this paper, we introduce a web cloud computing platform, named as microbiome mediation analysis with survival responses (MiMedSurv), for comprehensive microbiome mediation analysis with survival responses on user-friendly web environments. MiMedSurv is an extension of our prior web cloud computing platform, named as microbiome mediation analysis (MiMed), for survival responses. The two main features that are well-distinguished are as follows. First, MiMedSurv conducts some baseline exploratory non-mediational survival analysis, not involving microbiome, to survey the disparity in survival response between medical treatments/environmental exposures. Then, MiMedSurv identifies the mediating roles of the microbiome in various aspects: (i) as a microbial ecosystem using ecological indices (e.g., alpha and beta diversity indices) and (ii) as individual microbial taxa in various hierarchies (e.g., phyla, classes, orders, families, genera, species). To illustrate its use, we survey the mediating roles of the gut microbiome between antibiotic treatment and time-to-type 1 diabetes. MiMedSurv is freely available on our web server (http://mimedsurv.micloud.kr).}, } @article {pmid39220162, year = {2024}, author = {Blazhynska, M and Lagardère, L and Liu, C and Adjoua, O and Ren, P and Piquemal, JP}, title = {Water-glycan interactions drive the SARS-CoV-2 spike dynamics: insights into glycan-gate control and camouflage mechanisms.}, journal = {Chemical science}, volume = {15}, number = {35}, pages = {14177-14187}, pmid = {39220162}, issn = {2041-6520}, abstract = {To develop therapeutic strategies against COVID-19, we introduce a high-resolution all-atom polarizable model capturing many-body effects of protein, glycan, solvent, and membrane components in SARS-CoV-2 spike protein open and closed states. Employing μs-long molecular dynamics simulations powered by high-performance cloud-computing and unsupervised density-driven adaptive sampling, we investigated the differences in bulk-solvent-glycan and protein-solvent-glycan interfaces between these states. We unraveled a sophisticated solvent-glycan polarization interaction network involving the N165/N343 glycan-gate patterns that provide structural support for the open state and identified key water molecules that could potentially be targeted to destabilize this configuration. In the closed state, the reduced solvent polarization diminishes the overall N165/N343 dipoles, yet internal interactions and a reorganized sugar coat stabilize this state. Despite variations, our glycan-solvent accessibility analysis reveals the glycan shield capability to conserve constant interactions with the solvent, effectively camouflaging the virus from immune detection in both states. The presented insights advance our comprehension of viral pathogenesis at an atomic level, offering potential to combat COVID-19.}, } @article {pmid39217900, year = {2024}, author = {Cian, F and Delgado Blasco, JM and Ivanescu, C}, title = {Improving rapid flood impact assessment: An enhanced multi-sensor approach including a new flood mapping method based on Sentinel-2 data.}, journal = {Journal of environmental management}, volume = {369}, number = {}, pages = {122326}, doi = {10.1016/j.jenvman.2024.122326}, pmid = {39217900}, issn = {1095-8630}, mesh = {*Floods ; Satellite Imagery ; Risk Management/methods ; }, abstract = {Rapid flood impact assessment methods need complete and accurate flood maps to provide reliable information for disaster risk management, in particular for emergency response and recovery and reconstruction plans. With the aim of improving the rapid assessment of flood impacts, this work presents a new impact assessment method characterized by an enhanced satellite multi-sensor approach for flood mapping, which improves the characterization of the hazard. This includes a novel flood mapping method based on the new multi-temporal Modified Normalized Difference Water Index (MNDWI) that uses multi-temporal statistics computed on time-series of Sentinel-2 multi-spectral satellite images. The multi-temporal aspect of the MNDWI improves characterization of land cover over time and enhances the temporary flooded areas, which can be extracted through a thresholding technique, allowing the delineation of more precise and complete flood maps. The methodology, if implemented in cloud-based environments such as Google Earth Engine (GEE), is computationally light and robust, allowing the derivation of flood maps in matters of minutes, also for large areas. The flood mapping and impact assessment method has been applied to the seasonal flood occurred in South Sudan in 2020, using Sentinel-1, Sentinel-2 and PlanetScope satellite imagery. Flood impacts were assessed considering damages to buildings, roads, and cropland. The multi-sensor approach estimated an impact of 57.4 million USD (considering a middle-bound scenario), higher than what estimated by using Sentinel-1 data only, and Sentinel-2 data only (respectively 24% and 78% of the estimation resulting from the multi-sensor approach). This work highlights the effectiveness and importance of considering multi-source satellite data for flood mapping in a context of disaster risk management, to better inform disaster response, recovery and reconstruction plans.}, } @article {pmid39205138, year = {2024}, author = {Kontogiannis, S}, title = {Beehive Smart Detector Device for the Detection of Critical Conditions That Utilize Edge Device Computations and Deep Learning Inferences.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {16}, pages = {}, pmid = {39205138}, issn = {1424-8220}, abstract = {This paper presents a new edge detection process implemented in an embedded IoT device called Bee Smart Detection node to detect catastrophic apiary events. Such events include swarming, queen loss, and the detection of Colony Collapse Disorder (CCD) conditions. Two deep learning sub-processes are used for this purpose. The first uses a fuzzy multi-layered neural network of variable depths called fuzzy-stranded-NN to detect CCD conditions based on temperature and humidity measurements inside the beehive. The second utilizes a deep learning CNN model to detect swarming and queen loss cases based on sound recordings. The proposed processes have been implemented into autonomous Bee Smart Detection IoT devices that transmit their measurements and the detection results to the cloud over Wi-Fi. The BeeSD devices have been tested for easy-to-use functionality, autonomous operation, deep learning model inference accuracy, and inference execution speeds. The author presents the experimental results of the fuzzy-stranded-NN model for detecting critical conditions and deep learning CNN models for detecting swarming and queen loss. From the presented experimental results, the stranded-NN achieved accuracy results up to 95%, while the ResNet-50 model presented accuracy results up to 99% for detecting swarming or queen loss events. The ResNet-18 model is also the fastest inference speed replacement of the ResNet-50 model, achieving up to 93% accuracy results. Finally, cross-comparison of the deep learning models with machine learning ones shows that deep learning models can provide at least 3-5% better accuracy results.}, } @article {pmid39205076, year = {2024}, author = {Celik, AE and Rodriguez, I and Ayestaran, RG and Yavuz, SC}, title = {Decentralized System Synchronization among Collaborative Robots via 5G Technology.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {16}, pages = {}, pmid = {39205076}, issn = {1424-8220}, support = {RYC-2020-030676-I//Ministerio de Ciencia, Innovación y Universidades/ ; }, abstract = {In this article, we propose a distributed synchronization solution to achieve decentralized coordination in a system of collaborative robots. This is done by leveraging cloud-based computing and 5G technology to exchange causal ordering messages between the robots, eliminating the need for centralized control entities or programmable logic controllers in the system. The proposed solution is described, mathematically formulated, implemented in software, and validated over realistic network conditions. Further, the performance of the decentralized solution via 5G technology is compared to that achieved with traditional coordinated/uncoordinated cabled control systems. The results indicate that the proposed decentralized solution leveraging cloud-based 5G wireless is scalable to systems of up to 10 collaborative robots with comparable efficiency to that from standard cabled systems. The proposed solution has direct application in the control of producer-consumer and automated assembly line robotic applications.}, } @article {pmid39205014, year = {2024}, author = {Dauda, A and Flauzac, O and Nolot, F}, title = {A Survey on IoT Application Architectures.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {16}, pages = {}, pmid = {39205014}, issn = {1424-8220}, support = {1711/20//Petroleum Technology Development Fund (PTDF) Nigeria/ ; }, abstract = {The proliferation of the IoT has led to the development of diverse application architectures to optimize IoT systems' deployment, operation, and maintenance. This survey provides a comprehensive overview of the existing IoT application architectures, highlighting their key features, strengths, and limitations. The architectures are categorized based on their deployment models, such as cloud, edge, and fog computing approaches, each offering distinct advantages regarding scalability, latency, and resource efficiency. Cloud architectures leverage centralized data processing and storage capabilities to support large-scale IoT applications but often suffer from high latency and bandwidth constraints. Edge architectures mitigate these issues by bringing computation closer to the data source, enhancing real-time processing, and reducing network congestion. Fog architectures combine the strengths of both cloud and edge paradigms, offering a balanced solution for complex IoT environments. This survey also examines emerging trends and technologies in IoT application management, such as the solutions provided by the major IoT service providers like Intel, AWS, Microsoft Azure, and GCP. Through this study, the survey identifies latency, privacy, and deployment difficulties as key areas for future research. It highlights the need to advance IoT Edge architectures to reduce network traffic, improve data privacy, and enhance interoperability by developing multi-application and multi-protocol edge gateways for efficient IoT application management.}, } @article {pmid39205003, year = {2024}, author = {Rigas, S and Tzouveli, P and Kollias, S}, title = {An End-to-End Deep Learning Framework for Fault Detection in Marine Machinery.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {16}, pages = {}, pmid = {39205003}, issn = {1424-8220}, support = {ATHINAIKI RIVIERA - ATTP4-0325990//Greece and European Union: Attica 2014-2020/ ; }, abstract = {The Industrial Internet of Things has enabled the integration and analysis of vast volumes of data across various industries, with the maritime sector being no exception. Advances in cloud computing and deep learning (DL) are continuously reshaping the industry, particularly in optimizing maritime operations such as Predictive Maintenance (PdM). In this study, we propose a novel DL-based framework focusing on the fault detection task of PdM in marine operations, leveraging time-series data from sensors installed on shipboard machinery. The framework is designed as a scalable and cost-efficient software solution, encompassing all stages from data collection and pre-processing at the edge to the deployment and lifecycle management of DL models. The proposed DL architecture utilizes Graph Attention Networks (GATs) to extract spatio-temporal information from the time-series data and provides explainable predictions through a feature-wise scoring mechanism. Additionally, a custom evaluation metric with real-world applicability is employed, prioritizing both prediction accuracy and the timeliness of fault identification. To demonstrate the effectiveness of our framework, we conduct experiments on three types of open-source datasets relevant to PdM: electrical data, bearing datasets, and data from water circulation experiments.}, } @article {pmid39204979, year = {2024}, author = {Adame, T and Amri, E and Antonopoulos, G and Azaiez, S and Berne, A and Camargo, JS and Kakoulidis, H and Kleisarchaki, S and Llamedo, A and Prasinos, M and Psara, K and Shumaiev, K}, title = {Presenting the COGNIFOG Framework: Architecture, Building Blocks and Road toward Cognitive Connectivity.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {16}, pages = {}, pmid = {39204979}, issn = {1424-8220}, support = {101092968//European Union/ ; }, abstract = {In the era of ubiquitous computing, the challenges imposed by the increasing demand for real-time data processing, security, and energy efficiency call for innovative solutions. The emergence of fog computing has provided a promising paradigm to address these challenges by bringing computational resources closer to data sources. Despite its advantages, the fog computing characteristics pose challenges in heterogeneous environments in terms of resource allocation and management, provisioning, security, and connectivity, among others. This paper introduces COGNIFOG, a novel cognitive fog framework currently under development, which was designed to leverage intelligent, decentralized decision-making processes, machine learning algorithms, and distributed computing principles to enable the autonomous operation, adaptability, and scalability across the IoT-edge-cloud continuum. By integrating cognitive capabilities, COGNIFOG is expected to increase the efficiency and reliability of next-generation computing environments, potentially providing a seamless bridge between the physical and digital worlds. Preliminary experimental results with a limited set of connectivity-related COGNIFOG building blocks show promising improvements in network resource utilization in a real-world-based IoT scenario. Overall, this work paves the way for further developments on the framework, which are aimed at making it more intelligent, resilient, and aligned with the ever-evolving demands of next-generation computing environments.}, } @article {pmid39204967, year = {2024}, author = {Krishnamurthy, B and Shiva, SG}, title = {Integral-Valued Pythagorean Fuzzy-Set-Based Dyna Q+ Framework for Task Scheduling in Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {16}, pages = {}, pmid = {39204967}, issn = {1424-8220}, abstract = {Task scheduling is a critical challenge in cloud computing systems, greatly impacting their performance. Task scheduling is a nondeterministic polynomial time hard (NP-Hard) problem that complicates the search for nearly optimal solutions. Five major uncertainty parameters, i.e., security, traffic, workload, availability, and price, influence task scheduling decisions. The primary rationale for selecting these uncertainty parameters lies in the challenge of accurately measuring their values, as empirical estimations often diverge from the actual values. The integral-valued Pythagorean fuzzy set (IVPFS) is a promising mathematical framework to deal with parametric uncertainties. The Dyna Q+ algorithm is the updated form of the Dyna Q agent designed specifically for dynamic computing environments by providing bonus rewards to non-exploited states. In this paper, the Dyna Q+ agent is enriched with the IVPFS mathematical framework to make intelligent task scheduling decisions. The performance of the proposed IVPFS Dyna Q+ task scheduler is tested using the CloudSim 3.3 simulator. The execution time is reduced by 90%, the makespan time is also reduced by 90%, the operation cost is below 50%, and the resource utilization rate is improved by 95%, all of these parameters meeting the desired standards or expectations. The results are also further validated using an expected value analysis methodology that confirms the good performance of the task scheduler. A better balance between exploration and exploitation through rigorous action-based learning is achieved by the Dyna Q+ agent.}, } @article {pmid39190508, year = {2024}, author = {Wang, J and Lu, X and Wang, M and Hou, F and He, Y}, title = {Learning Implicit Fields for Point Cloud Filtering.}, journal = {IEEE transactions on visualization and computer graphics}, volume = {PP}, number = {}, pages = {}, doi = {10.1109/TVCG.2024.3450699}, pmid = {39190508}, issn = {1941-0506}, abstract = {Since point clouds acquired by scanners inevitably contain noise, recovering a clean version from a noisy point cloud is essential for further 3D geometry processing applications. Several data-driven approaches have been recently introduced to overcome the drawbacks of traditional filtering algorithms, such as less robust preservation of sharp features and tedious tuning for multiple parameters. Most of these methods achieve filtering by directly regressing the position/displacement of each point, which may blur detailed features and is prone to uneven distribution. In this paper, we propose a novel data-driven method that explores the implicit fields. Our assumption is that the given noisy points implicitly define a surface, and we attempt to obtain a point's movement direction and distance separately based on the predicted signed distance fields (SDFs). Taking a noisy point cloud as input, we first obtain a consistent alignment by incorporating the global points into local patches. We then feed them into an encoder-decoder structure and predict a 7D vector consisting of SDFs. Subsequently, the distance can be obtained directly from the first element in the vector, and the movement direction can be obtained by computing the gradient descent from the last six elements (i.e., six surrounding SDFs). We finally obtain the filtered results by moving each point with its predicted distance along its movement direction. Our method can produce feature-preserving results without requiring explicit normals. Experiments demonstrate that our method visually outperforms state-of-the-art methods and generally produces better quantitative results than position-based methods (both learning and non-learning).}, } @article {pmid39189264, year = {2024}, author = {Radu, MC and Armean, MS and Pop-Tudose, M and Medar, C and Manolescu, LSC}, title = {Exploring Factors Influencing Pregnant Women's Perceptions and Attitudes Towards Midwifery Care in Romania: Implications for Maternal Health Education Strategies.}, journal = {Nursing reports (Pavia, Italy)}, volume = {14}, number = {3}, pages = {1807-1818}, pmid = {39189264}, issn = {2039-4403}, abstract = {BACKGROUND: Midwives are strong advocates for vaginal births. However, their visibility and accessibility are poorly perceived by women in Romania. Consequently, the women's options are limited to a single direction when pregnancy occurs, involving the family doctor, the obstetrician, and often an interventional technical approach at the time of birth. The aim of this research is to identify specific variables that affect the perceptions and attitudes of pregnant women towards the care provided by midwives. This knowledge could contribute to the development of more effective education and information strategies within maternal health services.

METHODS: A cross-sectional observational analytical survey was conducted in Romania among pregnant women from the general population. Data were collected through a self-administered questionnaire, with informed consent obtained from each participating pregnant woman. The questionnaire was administered online using the cloud-based Google Forms platform and was available on the internet for seven months, from January to July 2023. The questionnaire was distributed through various media channels, both individually and in communication groups, in the form of a link. All questions were mandatory, and the questionnaire could only be submitted after answering all questions.

RESULTS: A total of 1301 individual responses were collected. The analysis of the socio-demographic and obstetrical profile of the pregnant women revealed that approximately half, 689 (52.95%), of the participants were aged between 18-29 years, and 1060 (81.47%) of the participants were married. Among our group of 1301 pregnant women, 973 (74.78%) had higher education, and 987 (75.86%) had a regular job. A majority of the survey participants, 936 (71.94%), lived in an urban geographic area, while 476 (36.58%) had attended childbirth education courses, and 791 (60.79%) were in the third trimester of pregnancy. A total of 298 (22.9%) respondents did not want to give birth in a hospital, and one-third, 347 (26.67%), did not place significant importance on control over the childbirth process.

CONCLUSIONS: The main factors influencing women's decisions regarding perinatal care and the importance of midwives as a component of the maternal-infant care team are modifiable, and thorough educational and psychological preparation would reduce the increasing predominance of preference for cesarean section, thereby promoting healthier and more woman- and child-centered perinatal care.}, } @article {pmid39187555, year = {2024}, author = {Farooq, O and Shahid, M and Arshad, S and Altaf, A and Iqbal, F and Vera, YAM and Flores, MAL and Ashraf, I}, title = {An enhanced approach for predicting air pollution using quantum support vector machine.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {19521}, pmid = {39187555}, issn = {2045-2322}, abstract = {The essence of quantum machine learning is to optimize problem-solving by executing machine learning algorithms on quantum computers and exploiting potent laws such as superposition and entanglement. Support vector machine (SVM) is widely recognized as one of the most effective classification machine learning techniques currently available. Since, in conventional systems, the SVM kernel technique tends to sluggish down and even fail as datasets become increasingly complex or jumbled. To compare the execution time and accuracy of conventional SVM classification to that of quantum SVM classification, the appropriate quantum features for mapping need to be selected. As the dataset grows complex, the importance of selecting an appropriate feature map that outperforms or performs as well as the classification grows. This paper utilizes conventional SVM to select an optimal feature map and benchmark dataset for predicting air quality. Experimental evidence demonstrates that the precision of quantum SVM surpasses that of classical SVM for air quality assessment. Using quantum labs from IBM's quantum computer cloud, conventional and quantum computing have been compared. When applied to the same dataset, the conventional SVM achieved an accuracy of 91% and 87% respectively, whereas the quantum SVM demonstrated an accuracy of 97% and 94% respectively for air quality prediction. The study introduces the use of quantum Support Vector Machines (SVM) for predicting air quality. It emphasizes the novel method of choosing the best quantum feature maps. Through the utilization of quantum-enhanced feature mapping, our objective is to exceed the constraints of classical SVM and achieve unparalleled levels of precision and effectiveness. We conduct precise experiments utilizing IBM's state-of-the-art quantum computer cloud to compare the performance of conventional and quantum SVM algorithms on a shared dataset.}, } @article {pmid39184128, year = {2024}, author = {Nagi, SC and Ashraf, F and Miles, A and Donnelly, MJ}, title = {AnoPrimer: Primer Design in malaria vectors informed by range-wide genomic variation.}, journal = {Wellcome open research}, volume = {9}, number = {}, pages = {255}, pmid = {39184128}, issn = {2398-502X}, support = {/WT_/Wellcome Trust/United Kingdom ; R01 AI116811/AI/NIAID NIH HHS/United States ; }, abstract = {The major malaria mosquitoes, Anopheles gambiae s.l and Anopheles funestus, are some of the most studied organisms in medical research and also some of the most genetically diverse. When designing polymerase chain reaction (PCR) or hybridisation-based molecular assays, reliable primer and probe design is crucial. However, single nucleotide polymorphisms (SNPs) in primer binding sites can prevent primer binding, leading to null alleles, or bind suboptimally, leading to preferential amplification of specific alleles. Given the extreme genetic diversity of Anopheles mosquitoes, researchers need to consider this genetic variation when designing primers and probes to avoid amplification problems. In this note, we present a Python package, AnoPrimer, which exploits the Ag1000G and Af1000 datasets and allows users to rapidly design primers in An. gambiae or An. funestus, whilst summarising genetic variation in the primer binding sites and visualising the position of primer pairs. AnoPrimer allows the design of both genomic DNA and cDNA primers and hybridisation probes. By coupling this Python package with Google Colaboratory, AnoPrimer is an open and accessible platform for primer and probe design, hosted in the cloud for free. AnoPrimer is available here https://github.com/sanjaynagi/AnoPrimer and we hope it will be a useful resource for the community to design probe and primer sets that can be reliably deployed across the An. gambiae and funestus species ranges.}, } @article {pmid39183823, year = {2024}, author = {Chen, M and Qi, P and Chu, Y and Wang, B and Wang, F and Cao, J}, title = {Genetic algorithm with skew mutation for heterogeneous resource-aware task offloading in edge-cloud computing.}, journal = {Heliyon}, volume = {10}, number = {12}, pages = {e32399}, pmid = {39183823}, issn = {2405-8440}, abstract = {Recent years, edge-cloud computing has attracted more and more attention due to benefits from the combination of edge and cloud computing. Task scheduling is still one of the major challenges for improving service quality and resource efficiency of edge-clouds. Though several researches have studied on the scheduling problem, there remains issues needed to be addressed for their applications, e.g., ignoring resource heterogeneity, focusing on only one kind of requests. Therefore, in this paper, we aim at providing a heterogeneity aware task scheduling algorithm to improve task completion rate and resource utilization for edge-clouds with deadline constraints. Due to NP-hardness of the scheduling problem, we exploit genetic algorithm (GA), one of the most representative and widely used meta-heuristic algorithms, to solve the problem considering task completion rate and resource utilization as major and minor optimization objectives, respectively. In our GA-based scheduling algorithm, a gene indicates which resource that its corresponding task is processed by. To improve the performance of GA, we propose to exploit a skew mutation operator where genes are associated to resource heterogeneity during the population evolution. We conduct extensive experiments to evaluate the performance of our algorithm, and results verify the performance superiority of our algorithm in task completion rate, compared with other thirteen classical and up-to-date scheduling algorithms.}, } @article {pmid39179595, year = {2024}, author = {Huang, Y and Lu, Y and Li, W and Xu, X and Jiang, X and Ma, R and Chen, L and Ruan, N and Wu, Q and Xu, J}, title = {Giant Kerr nonlinearity of terahertz waves mediated by stimulated phonon polaritons in a microcavity chip.}, journal = {Light, science & applications}, volume = {13}, number = {1}, pages = {212}, pmid = {39179595}, issn = {2047-7538}, support = {11974192//National Natural Science Foundation of China (National Science Foundation of China)/ ; 62205158//National Natural Science Foundation of China (National Science Foundation of China)/ ; }, abstract = {Optical Kerr effect, in which input light intensity linearly alters the refractive index, has enabled the generation of optical solitons, supercontinuum spectra, and frequency combs, playing vital roles in the on-chip devices, fiber communications, and quantum manipulations. Especially, terahertz Kerr effect, featuring fascinating prospects in future high-rate computing, artificial intelligence, and cloud-based technologies, encounters a great challenge due to the rather low power density and feeble Kerr response. Here, we demonstrate a giant terahertz frequency Kerr nonlinearity mediated by stimulated phonon polaritons. Under the influences of the giant Kerr nonlinearity, the power-dependent refractive index change would result in a frequency shift in the microcavity, which was experimentally demonstrated via the measurement of the resonant mode of a chip-scale lithium niobate Fabry-Pérot microcavity. Attributed to the existence of stimulated phonon polaritons, the nonlinear coefficient extracted from the frequency shifts is orders of magnitude larger than that of visible and infrared light, which is also theoretically demonstrated by nonlinear Huang equations. This work opens an avenue for many rich and fruitful terahertz Kerr effect based physical, chemical, and biological systems that have terahertz fingerprints.}, } @article {pmid39176842, year = {2024}, author = {Mammas, CS and Mamma, AS}, title = {Remote Monitoring, AI, Machine Learning and Mobile Ultrasound Integration upon 5G Internet in the Prehospital Care to Support the Golden Hour Principle and Optimize Outcomes in Severe Trauma and Emergency Surgery.}, journal = {Studies in health technology and informatics}, volume = {316}, number = {}, pages = {1807-1811}, doi = {10.3233/SHTI240782}, pmid = {39176842}, issn = {1879-8365}, mesh = {Humans ; *Machine Learning ; *Ultrasonography ; *Emergency Medical Services ; *Wounds and Injuries/diagnostic imaging/therapy ; Telemedicine ; Artificial Intelligence ; Internet ; Feasibility Studies ; Reproducibility of Results ; }, abstract = {AIM: Feasibility and reliability evaluation of 5G internet networks (5G IN) upon Artificial Intelligence (AI)/Machine Learning (ML), of telemonitoring and mobile ultrasound (m u/s) in an ambulance car (AC)- integrated in the pre-hospital setting (PS)- to support the Golden Hour Principle (GHP) and optimize outcomes in severe trauma (TRS).

MATERIAL AND METHODS: (PS) organization and care upon (5G IN) high bandwidths (10 GB/s) mobile tele-communication (mTC) experimentation by using the experimental Cobot PROMETHEUS III, pn:100016 by simulation upon six severe trauma clinical cases by ten (N1=10) experts: Four professional rescuers (n1=4), three trauma surgeons (n2=3), a radiologist (n3=1) and two information technology specialists (n4=2) to evaluate feasibility, reliability and clinical usability for instant risk, prognosis and triage computation, decision support and treatment planning by (AI)/(ML) computations in (PS) of (TRS) as well as by performing (PS) (m u/s).

RESULTS: A. Trauma severity scales instant computations by the Cobot PROMETHEUS III, pn 100016)) based on AI and ML complex algorithms and Cloud Computing, telemonitoring and r showed very high feasibility and reliability upon (5GIN) under specific, technological, training and ergonomic prerequisites B. Measured be-directional (m u/s) images data sharing between (AC) and (ED/TC) showed very high feasibility and reliability upon (5G IN) under specific, technological and ergonomic conditions in (TRS).

CONCLUSION: Integration of (PS) tele-monitoring with (AI)/(ML) and (PS) (m u/s) upon (5GIN) via the Cobot PROMETHEUS III, (pn 100016) in severe (TRS/ES), seems feasible and under specific prerequisites reliable to support the (GHP) and optimize outcomes in adult and pediatric (TRS/ES).}, } @article {pmid39163538, year = {2025}, author = {Komarasamy, D and Ramaganthan, SM and Kandaswamy, DM and Mony, G}, title = {Deep learning and optimization enabled multi-objective for task scheduling in cloud computing.}, journal = {Network (Bristol, England)}, volume = {36}, number = {1}, pages = {79-108}, doi = {10.1080/0954898X.2024.2391395}, pmid = {39163538}, issn = {1361-6536}, mesh = {*Deep Learning ; *Cloud Computing ; Algorithms ; Neural Networks, Computer ; Humans ; }, abstract = {In cloud computing (CC), task scheduling allocates the task to best suitable resource for execution. This article proposes a model for task scheduling utilizing the multi-objective optimization and deep learning (DL) model. Initially, the multi-objective task scheduling is carried out by the incoming user utilizing the proposed hybrid fractional flamingo beetle optimization (FFBO) which is formed by integrating dung beetle optimization (DBO), flamingo search algorithm (FSA) and fractional calculus (FC). Here, the fitness function depends on reliability, cost, predicted energy, and makespan, the predicted energy is forecasted by a deep residual network (DRN). Thereafter, task scheduling is accomplished based on DL using the proposed deep feedforward neural network fused long short-term memory (DFNN-LSTM), which is the combination of DFNN and LSTM. Moreover, when scheduling the workflow, the task parameters and the virtual machine's (VM) live parameters are taken into consideration. Task parameters are earliest finish time (EFT), earliest start time (EST), task length, task priority, and actual task running time, whereas VM parameters include memory utilization, bandwidth utilization, capacity, and central processing unit (CPU). The proposed model DFNN-LSTM+FFBO has achieved superior makespan, energy, and resource utilization of 0.188, 0.950J, and 0.238, respectively.}, } @article {pmid39160778, year = {2024}, author = {Jat, AS and Grønli, TM and Ghinea, G and Assres, G}, title = {Evolving Software Architecture Design in Telemedicine: A PRISMA-based Systematic Review.}, journal = {Healthcare informatics research}, volume = {30}, number = {3}, pages = {184-193}, pmid = {39160778}, issn = {2093-3681}, support = {//Kristiania University College/ ; }, abstract = {OBJECTIVES: This article presents a systematic review of recent advancements in telemedicine architectures for continuous monitoring, providing a comprehensive overview of the evolving software engineering practices underpinning these systems. The review aims to illuminate the critical role of telemedicine in delivering healthcare services, especially during global health crises, and to emphasize the importance of effectiveness, security, interoperability, and scalability in these systems.

METHODS: A systematic review methodology was employed, adhering to the Preferred Reporting Items for Systematic Reviews and Meta-Analyses framework. As the primary research method, the PubMed, IEEE Xplore, and Scopus databases were searched to identify articles relevant to telemedicine architectures for continuous monitoring. Seventeen articles were selected for analysis, and a methodical approach was employed to investigate and synthesize the findings.

RESULTS: The review identified a notable trend towards the integration of emerging technologies into telemedicine architectures. Key areas of focus include interoperability, security, and scalability. Innovations such as cognitive radio technology, behavior-based control architectures, Health Level Seven International (HL7) Fast Healthcare Interoperability Resources (FHIR) standards, cloud computing, decentralized systems, and blockchain technology are addressing challenges in remote healthcare delivery and continuous monitoring.

CONCLUSIONS: This review highlights major advancements in telemedicine architectures, emphasizing the integration of advanced technologies to improve interoperability, security, and scalability. The findings underscore the successful application of cognitive radio technology, behavior-based control, HL7 FHIR standards, cloud computing, decentralized systems, and blockchain in advancing remote healthcare delivery.}, } @article {pmid39156807, year = {2024}, author = {Cosic, K and Kopilas, V and Jovanovic, T}, title = {War, emotions, mental health, and artificial intelligence.}, journal = {Frontiers in psychology}, volume = {15}, number = {}, pages = {1394045}, pmid = {39156807}, issn = {1664-1078}, abstract = {During the war time dysregulation of negative emotions such as fear, anger, hatred, frustration, sadness, humiliation, and hopelessness can overrule normal societal values, culture, and endanger global peace and security, and mental health in affected societies. Therefore, it is understandable that the range and power of negative emotions may play important roles in consideration of human behavior in any armed conflict. The estimation and assessment of dominant negative emotions during war time are crucial but are challenged by the complexity of emotions' neuro-psycho-physiology. Currently available natural language processing (NLP) tools have comprehensive computational methods to analyze and understand the emotional content of related textual data in war-inflicted societies. Innovative AI-driven technologies incorporating machine learning, neuro-linguistic programming, cloud infrastructure, and novel digital therapeutic tools and applications present an immense potential to enhance mental health care worldwide. This advancement could make mental health services more cost-effective and readily accessible. Due to the inadequate number of psychiatrists and limited psychiatric resources in coping with mental health consequences of war and traumas, new digital therapeutic wearable devices supported by AI tools and means might be promising approach in psychiatry of future. Transformation of negative dominant emotional maps might be undertaken by the simultaneous combination of online cognitive behavioral therapy (CBT) on individual level, as well as usage of emotionally based strategic communications (EBSC) on a public level. The proposed positive emotional transformation by means of CBT and EBSC may provide important leverage in efforts to protect mental health of civil population in war-inflicted societies. AI-based tools that can be applied in design of EBSC stimuli, like Open AI Chat GPT or Google Gemini may have great potential to significantly enhance emotionally based strategic communications by more comprehensive understanding of semantic and linguistic analysis of available text datasets of war-traumatized society. Human in the loop enhanced by Chat GPT and Gemini can aid in design and development of emotionally annotated messages that resonate among targeted population, amplifying the impact of strategic communications in shaping human dominant emotional maps into a more positive by CBT and EBCS.}, } @article {pmid39151500, year = {2024}, author = {Ouyang, T and Yang, J and Gu, Z and Zhang, L and Wang, D and Wang, Y and Yang, Y}, title = {Research on privacy protection in the context of healthcare data based on knowledge map.}, journal = {Medicine}, volume = {103}, number = {33}, pages = {e39370}, pmid = {39151500}, issn = {1536-5964}, support = {Grant No.2023Ah040102//Major Scientific Research Project of Anhui Provincial Department of Education/ ; Grant No.2022Ah010038 and No.2023sdxx027//Anhui Province quality projects/ ; Grant no.2021rwzd12//Key humanities projects of Anhui University of Traditional Chinese Medicine/ ; Grant No.JNFX2023020//Middle-aged Young Teacher Training Action Project of Anhui Provincial Department of Education/ ; Grant No.2023jyxm0370//General Project of Teaching Research in Anhui Province/ ; }, mesh = {Humans ; *Big Data ; *Computer Security ; *Privacy ; *Confidentiality ; Bibliometrics ; }, abstract = {With the rapid development of emerging information technologies such as artificial intelligence, cloud computing, and the Internet of Things, the world has entered the era of big data. In the face of growing medical big data, research on the privacy protection of personal information has attracted more and more attention, but few studies have analyzed and forecasted the research hotspots and future development trends on the privacy protection. Presently, to systematically and comprehensively summarize the relevant privacy protection literature in the context of big healthcare data, a bibliometric analysis was conducted to clarify the spatial and temporal distribution and research hotspots of privacy protection using the information visualization software CiteSpace. The literature papers related to privacy protection in the Web of Science were collected from 2012 to 2023. Through analysis of the time, author and countries distribution of relevant publications, we found that after 2013, research on the privacy protection has received increasing attention and the core institution of privacy protection research is the university, but the countries show weak cooperation. Additionally, keywords like privacy, big data, internet, challenge, care, and information have high centralities and frequency, indicating the research hotspots and research trends in the field of the privacy protection. All the findings will provide a comprehensive privacy protection research knowledge structure for scholars in the field of privacy protection research under the background of health big data, which can help them quickly grasp the research hotspots and choose future research projects.}, } @article {pmid39150579, year = {2024}, author = {Gehlhaar, DK and Mermelstein, DJ}, title = {FitScore: a fast machine learning-based score for 3D virtual screening enrichment.}, journal = {Journal of computer-aided molecular design}, volume = {38}, number = {1}, pages = {29}, pmid = {39150579}, issn = {1573-4951}, mesh = {*Machine Learning ; Ligands ; *Molecular Docking Simulation ; Binding Sites ; Humans ; Protein Binding ; Proteins/chemistry/metabolism ; Software ; Drug Evaluation, Preclinical/methods ; Drug Discovery/methods ; }, abstract = {Enhancing virtual screening enrichment has become an urgent problem in computational chemistry, driven by increasingly large databases of commercially available compounds, without a commensurate drop in in vitro screening costs. Docking these large databases is possible with cloud-scale computing. However, rapid docking necessitates compromises in scoring, often leading to poor enrichment and an abundance of false positives in docking results. This work describes a new scoring function composed of two parts - a knowledge-based component that predicts the probability of a particular atom type being in a particular receptor environment, and a tunable weight matrix that converts the probability predictions into a dimensionless score suitable for virtual screening enrichment. This score, the FitScore, represents the compatibility between the ligand and the binding site and is capable of a high degree of enrichment across standardized docking test sets.}, } @article {pmid39149018, year = {2024}, author = {Kim, C and Lee, J}, title = {Discovering patterns and trends in customer service technologies patents using large language model.}, journal = {Heliyon}, volume = {10}, number = {14}, pages = {e34701}, pmid = {39149018}, issn = {2405-8440}, abstract = {The definition of service has evolved from a focus on material value in manufacturing before the 2000s to a customer-centric value based on the significant growth of the service industry. Digital transformation has become essential for companies in the service industry due to the incorporation of digital technology through the Fourth Industrial Revolution and COVID-19. This study utilised Bidirectional Encoder Representations from Transformer (BERT) to analyse 3029 international patents related to the customer service industry and digital transformation registered between 2000 and 2022. Through topic modelling, this study identified 10 major topics in the customer service industry and analysed their yearly trends. Our findings show that as of 2022, the trend with the highest frequency is user-centric network service design, while cloud computing has experienced the steepest increase in the last five years. User-centric network services have been steadily developing since the inception of the Internet. Cloud computing is one of the key technologies being developed intensively in 2023 for the digital transformation of customer service. This study identifies time series trends of customer service industry patents and suggests the effectiveness of using BERTopic to predict future trends in technology.}, } @article {pmid39147188, year = {2024}, author = {Miguel, S and Ruiz-Benito, P and Rebollo, P and Viana-Soto, A and Mihai, MC and García-Martín, A and Tanase, M}, title = {Forest disturbance regimes and trends in continental Spain (1985-2023) using dense landsat time series.}, journal = {Environmental research}, volume = {262}, number = {Pt 1}, pages = {119802}, doi = {10.1016/j.envres.2024.119802}, pmid = {39147188}, issn = {1096-0953}, mesh = {Spain ; *Forests ; Environmental Monitoring/methods ; Remote Sensing Technology ; Conservation of Natural Resources ; }, abstract = {Forest disturbance regimes across biomes are being altered by interactive effects of global change. Establishing baselines for assessing change requires detailed quantitative data on past disturbance events, but such data are scarce and difficult to obtain over large spatial and temporal scales. The integration of remote sensing with dense time series analysis and cloud computing platforms is enhancing the ability to monitor historical disturbances, and especially non-stand replacing events along climatic gradients. Since the integration of such tools is still scarce in Mediterranean regions, here, we combine dense Landsat time series and the Continuous Change Detection and Classification - Spectral Mixture Analysis (CCDC-SMA) method to monitor forest disturbance in continental Spain from 1985 to 2023. We adapted the CCDC-SMA method for improved disturbance detection creating new spectral libraries representative of the study region, and quantified the year, month, severity, return interval, and type of disturbance (stand replacing, non-stand replacing) at a 30 m resolution. In addition, we characterised forest disturbance regimes and trends (patch size and severity, and frequency of events) of events larger than 0.5 ha at the national scale by biome (Mediterranean and temperate) and forest type (broadleaf, needleleaf and mixed). We quantified more than 2.9 million patches of disturbed forest, covering 4.6 Mha over the region and period studied. Forest disturbances were on average larger but less severe in the Mediterranean than in the temperate biome, and significantly larger and more severe in needleleaf than in mixed and broadleaf forests. Since the late 1980s, forest disturbances have decreased in size and severity while increasing in frequency across all biomes and forest types. These results have important implications as they confirm that disturbance regimes in continental Spain are changing and should therefore be considered in forest strategic planning for policy development and implementation.}, } @article {pmid39146286, year = {2024}, author = {Zohora, MF and Farhin, F and Kaiser, MS}, title = {An enhanced round robin using dynamic time quantum for real-time asymmetric burst length processes in cloud computing environment.}, journal = {PloS one}, volume = {19}, number = {8}, pages = {e0304517}, pmid = {39146286}, issn = {1932-6203}, mesh = {*Cloud Computing ; *Algorithms ; Time Factors ; }, abstract = {Cloud computing is a popular, flexible, scalable, and cost-effective technology in the modern world that provides on-demand services dynamically. The dynamic execution of user requests and resource-sharing facilities require proper task scheduling among the available virtual machines, which is a significant issue and plays a crucial role in developing an optimal cloud computing environment. Round Robin is a prevalent scheduling algorithm for fair distribution of resources with a balanced contribution in minimized response time and turnaround time. This paper introduced a new enhanced round-robin approach for task scheduling in cloud computing systems. The proposed algorithm generates and keeps updating a dynamic quantum time for process execution, considering the available number of process in the system and their burst length. Since our method dynamically runs processes, it is appropriate for a real-time environment like cloud computing. The notable part of this approach is the capability of scheduling tasks with asymmetric distribution of burst time, avoiding the convoy effect. The experimental result indicates that the proposed algorithm has outperformed the existing improved round-robin task scheduling approaches in terms of minimized average waiting time, average turnaround time, and number of context switches. Comparing the method against five other enhanced round robin approaches, it reduced average waiting times by 15.77% and context switching by 20.68% on average. After executing the experiment and comparative study, it can be concluded that the proposed enhanced round-robin scheduling algorithm is optimal, acceptable, and relatively better suited for cloud computing environments.}, } @article {pmid39140427, year = {2024}, author = {Cao, Y and Zhang, Z and Qin, BW and Sang, W and Li, H and Wang, T and Tan, F and Gan, Y and Zhang, X and Liu, T and Xiang, D and Lin, W and Liu, Q}, title = {Physical Reservoir Computing Using van der Waals Ferroelectrics for Acoustic Keyword Spotting.}, journal = {ACS nano}, volume = {18}, number = {34}, pages = {23265-23276}, doi = {10.1021/acsnano.4c06144}, pmid = {39140427}, issn = {1936-086X}, abstract = {Acoustic keyword spotting (KWS) plays a pivotal role in the voice-activated systems of artificial intelligence (AI), allowing for hands-free interactions between humans and smart devices through information retrieval of the voice commands. The cloud computing technology integrated with the artificial neural networks has been employed to execute the KWS tasks, which however suffers from propagation delay and the risk of privacy breach. Here, we report a single-node reservoir computing (RC) system based on the CuInP2S6 (CIPS)/graphene heterostructure planar device for implementing the KWS task with low computation cost. Through deliberately tuning the Schottky barrier height at the ferroelectric CIPS interfaces for the thermionic injection and transport of the electrons, the typical nonlinear current response and fading memory characteristics are achieved in the device. Additionally, the device exhibits diverse synaptic plasticity with an excellent separation capability of the temporal information. We construct a RC system through employing the ferroelectric device as the physical node to spot the acoustic keywords, i.e., the natural numbers from 1 to 9 based on simulation, in which the system demonstrates outstanding performance with high accuracy rate (>94.6%) and recall rate (>92.0%). Our work promises physical RC in single-node configuration as a prospective computing platform to process the acoustic keywords, promoting its applications in the artificial auditory system at the edge.}, } @article {pmid39138951, year = {2024}, author = {Guide, A and Garbett, S and Feng, X and Mapes, BM and Cook, J and Sulieman, L and Cronin, RM and Chen, Q}, title = {Balancing efficacy and computational burden: weighted mean, multiple imputation, and inverse probability weighting methods for item non-response in reliable scales.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {31}, number = {12}, pages = {2869-2879}, pmid = {39138951}, issn = {1527-974X}, support = {OT2 OD026556/OD/NIH HHS/United States ; U24 OD023121/OD/NIH HHS/United States ; OT2 OD026552/OD/NIH HHS/United States ; OT2 OD026549/OD/NIH HHS/United States ; OT2 OD025337/OD/NIH HHS/United States ; OT2 OD025277/OD/NIH HHS/United States ; OT2 OD026555/OD/NIH HHS/United States ; OT2 OD026550/OD/NIH HHS/United States ; R21MD019103/MD/NIMHD NIH HHS/United States ; OT2 OD026553/OD/NIH HHS/United States ; OT2 OD023205/OD/NIH HHS/United States ; OT2 OD025276/OD/NIH HHS/United States ; OT2 OD026554/OD/NIH HHS/United States ; U24 OD023163/OD/NIH HHS/United States ; OT2 OD023206/OD/NIH HHS/United States ; U24 OD023176/OD/NIH HHS/United States ; OT2 OD026548/OD/NIH HHS/United States ; 3OT2OD035404//Office of the Director: Data and Research Center/ ; U2C OD023196/OD/NIH HHS/United States ; OT2 OD035404/OD/NIH HHS/United States ; OT2 OD025315/OD/NIH HHS/United States ; 3OT2OD035404/NH/NIH HHS/United States ; OT2 OD026551/OD/NIH HHS/United States ; OT2 OD026557/OD/NIH HHS/United States ; R21 MD019103/MD/NIMHD NIH HHS/United States ; }, mesh = {Humans ; Surveys and Questionnaires ; *Probability ; Data Interpretation, Statistical ; Exercise ; }, abstract = {IMPORTANCE: Scales often arise from multi-item questionnaires, yet commonly face item non-response. Traditional solutions use weighted mean (WMean) from available responses, but potentially overlook missing data intricacies. Advanced methods like multiple imputation (MI) address broader missing data, but demand increased computational resources. Researchers frequently use survey data in the All of Us Research Program (All of Us), and it is imperative to determine if the increased computational burden of employing MI to handle non-response is justifiable.

OBJECTIVES: Using the 5-item Physical Activity Neighborhood Environment Scale (PANES) in All of Us, this study assessed the tradeoff between efficacy and computational demands of WMean, MI, and inverse probability weighting (IPW) when dealing with item non-response.

MATERIALS AND METHODS: Synthetic missingness, allowing 1 or more item non-response, was introduced into PANES across 3 missing mechanisms and various missing percentages (10%-50%). Each scenario compared WMean of complete questions, MI, and IPW on bias, variability, coverage probability, and computation time.

RESULTS: All methods showed minimal biases (all <5.5%) for good internal consistency, with WMean suffered most with poor consistency. IPW showed considerable variability with increasing missing percentage. MI required significantly more computational resources, taking >8000 and >100 times longer than WMean and IPW in full data analysis, respectively.

DISCUSSION AND CONCLUSION: The marginal performance advantages of MI for item non-response in highly reliable scales do not warrant its escalated cloud computational burden in All of Us, particularly when coupled with computationally demanding post-imputation analyses. Researchers using survey scales with low missingness could utilize WMean to reduce computing burden.}, } @article {pmid39138215, year = {2024}, author = {Bontempi, D and Nuernberg, L and Pai, S and Krishnaswamy, D and Thiriveedhi, V and Hosny, A and Mak, RH and Farahani, K and Kikinis, R and Fedorov, A and Aerts, HJWL}, title = {End-to-end reproducible AI pipelines in radiology using the cloud.}, journal = {Nature communications}, volume = {15}, number = {1}, pages = {6931}, pmid = {39138215}, issn = {2041-1723}, support = {HHSN261201500003C/CA/NCI NIH HHS/United States ; HHSN261201500003I/CA/NCI NIH HHS/United States ; 866504//EC | EU Framework Programme for Research and Innovation H2020 | H2020 Priority Excellent Science | H2020 European Research Council (H2020 Excellent Science - European Research Council)/ ; HHSN261201500003l//Foundation for the National Institutes of Health (Foundation for the National Institutes of Health, Inc.)/ ; }, mesh = {*Cloud Computing ; Humans ; *Artificial Intelligence ; Reproducibility of Results ; Deep Learning ; Radiology/methods/standards ; Algorithms ; Neoplasms/diagnostic imaging ; Image Processing, Computer-Assisted/methods ; }, abstract = {Artificial intelligence (AI) algorithms hold the potential to revolutionize radiology. However, a significant portion of the published literature lacks transparency and reproducibility, which hampers sustained progress toward clinical translation. Although several reporting guidelines have been proposed, identifying practical means to address these issues remains challenging. Here, we show the potential of cloud-based infrastructure for implementing and sharing transparent and reproducible AI-based radiology pipelines. We demonstrate end-to-end reproducibility from retrieving cloud-hosted data, through data pre-processing, deep learning inference, and post-processing, to the analysis and reporting of the final results. We successfully implement two distinct use cases, starting from recent literature on AI-based biomarkers for cancer imaging. Using cloud-hosted data and computing, we confirm the findings of these studies and extend the validation to previously unseen data for one of the use cases. Furthermore, we provide the community with transparent and easy-to-extend examples of pipelines impactful for the broader oncology field. Our approach demonstrates the potential of cloud resources for implementing, sharing, and using reproducible and transparent AI pipelines, which can accelerate the translation into clinical solutions.}, } @article {pmid39129832, year = {2024}, author = {Ju, D and Kim, S}, title = {Volatile tin oxide memristor for neuromorphic computing.}, journal = {iScience}, volume = {27}, number = {8}, pages = {110479}, pmid = {39129832}, issn = {2589-0042}, abstract = {The rise of neuromorphic systems has addressed the shortcomings of current computing architectures, especially regarding energy efficiency and scalability. These systems use cutting-edge technologies such as Pt/SnOx/TiN memristors, which efficiently mimic synaptic behavior and provide potential solutions to modern computing challenges. Moreover, their unipolar resistive switching ability enables precise modulation of the synaptic weights, facilitating energy-efficient parallel processing that is similar to biological synapses. Additionally, memristors' spike-rate-dependent plasticity enhances the adaptability of neural circuits, offering promising applications in intelligent computing. Integrating memristors into edge computing architectures further highlights their importance in tackling the security and efficiency issues associated with conventional cloud computing models.}, } @article {pmid39124116, year = {2024}, author = {Pazhanivel, DB and Velu, AN and Palaniappan, BS}, title = {Design and Enhancement of a Fog-Enabled Air Quality Monitoring and Prediction System: An Optimized Lightweight Deep Learning Model for a Smart Fog Environmental Gateway.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {15}, pages = {}, pmid = {39124116}, issn = {1424-8220}, abstract = {Effective air quality monitoring and forecasting are essential for safeguarding public health, protecting the environment, and promoting sustainable development in smart cities. Conventional systems are cloud-based, incur high costs, lack accurate Deep Learning (DL)models for multi-step forecasting, and fail to optimize DL models for fog nodes. To address these challenges, this paper proposes a Fog-enabled Air Quality Monitoring and Prediction (FAQMP) system by integrating the Internet of Things (IoT), Fog Computing (FC), Low-Power Wide-Area Networks (LPWANs), and Deep Learning (DL) for improved accuracy and efficiency in monitoring and forecasting air quality levels. The three-layered FAQMP system includes a low-cost Air Quality Monitoring (AQM) node transmitting data via LoRa to the Fog Computing layer and then the cloud layer for complex processing. The Smart Fog Environmental Gateway (SFEG) in the FC layer introduces efficient Fog Intelligence by employing an optimized lightweight DL-based Sequence-to-Sequence (Seq2Seq) Gated Recurrent Unit (GRU) attention model, enabling real-time processing, accurate forecasting, and timely warnings of dangerous AQI levels while optimizing fog resource usage. Initially, the Seq2Seq GRU Attention model, validated for multi-step forecasting, outperformed the state-of-the-art DL methods with an average RMSE of 5.5576, MAE of 3.4975, MAPE of 19.1991%, R[2] of 0.6926, and Theil's U1 of 0.1325. This model is then made lightweight and optimized using post-training quantization (PTQ), specifically dynamic range quantization, which reduced the model size to less than a quarter of the original, improved execution time by 81.53% while maintaining forecast accuracy. This optimization enables efficient deployment on resource-constrained fog nodes like SFEG by balancing performance and computational efficiency, thereby enhancing the effectiveness of the FAQMP system through efficient Fog Intelligence. The FAQMP system, supported by the EnviroWeb application, provides real-time AQI updates, forecasts, and alerts, aiding the government in proactively addressing pollution concerns, maintaining air quality standards, and fostering a healthier and more sustainable environment.}, } @article {pmid39123976, year = {2024}, author = {Villar, E and Martín Toral, I and Calvo, I and Barambones, O and Fernández-Bustamante, P}, title = {Architectures for Industrial AIoT Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {15}, pages = {}, pmid = {39123976}, issn = {1424-8220}, abstract = {Industry 4.0 introduced new concepts, technologies, and paradigms, such as Cyber Physical Systems (CPSs), Industrial Internet of Things (IIoT) and, more recently, Artificial Intelligence of Things (AIoT). These paradigms ease the creation of complex systems by integrating heterogeneous devices. As a result, the structure of the production systems is changing completely. In this scenario, the adoption of reference architectures based on standards may guide designers and developers to create complex AIoT applications. This article surveys the main reference architectures available for industrial AIoT applications, analyzing their key characteristics, objectives, and benefits; it also presents some use cases that may help designers create new applications. The main goal of this review is to help engineers identify the alternative that best suits every application. The authors conclude that existing reference architectures are a necessary tool for standardizing AIoT applications, since they may guide developers in the process of developing new applications. However, the use of reference architectures in real AIoT industrial applications is still incipient, so more development effort is needed in order for it to be widely adopted.}, } @article {pmid39116433, year = {2024}, author = {Sibanda, K and Ndayizigamiye, P and Twinomurinzi, H}, title = {Industry 4.0 Technologies in Maternal Health Care: Bibliometric Analysis and Research Agenda.}, journal = {JMIR pediatrics and parenting}, volume = {7}, number = {}, pages = {e47848}, pmid = {39116433}, issn = {2561-6722}, abstract = {BACKGROUND: Industry 4.0 (I4.0) technologies have improved operations in health care facilities by optimizing processes, leading to efficient systems and tools to assist health care personnel and patients.

OBJECTIVE: This study investigates the current implementation and impact of I4.0 technologies within maternal health care, explicitly focusing on transforming care processes, treatment methods, and automated pregnancy monitoring. Additionally, it conducts a thematic landscape mapping, offering a nuanced understanding of this emerging field. Building on this analysis, a future research agenda is proposed, highlighting critical areas for future investigations.

METHODS: A bibliometric analysis of publications retrieved from the Scopus database was conducted to examine how the research into I4.0 technologies in maternal health care evolved from 1985 to 2022. A search strategy was used to screen the eligible publications using the abstract and full-text reading. The most productive and influential journals; authors', institutions', and countries' influence on maternal health care; and current trends and thematic evolution were computed using the Bibliometrix R package (R Core Team).

RESULTS: A total of 1003 unique papers in English were retrieved using the search string, and 136 papers were retained after the inclusion and exclusion criteria were implemented, covering 37 years from 1985 to 2022. The annual growth rate of publications was 9.53%, with 88.9% (n=121) of the publications observed in 2016-2022. In the thematic analysis, 4 clusters were identified-artificial neural networks, data mining, machine learning, and the Internet of Things. Artificial intelligence, deep learning, risk prediction, digital health, telemedicine, wearable devices, mobile health care, and cloud computing remained the dominant research themes in 2016-2022.

CONCLUSIONS: This bibliometric analysis reviews the state of the art in the evolution and structure of I4.0 technologies in maternal health care and how they may be used to optimize the operational processes. A conceptual framework with 4 performance factors-risk prediction, hospital care, health record management, and self-care-is suggested for process improvement. a research agenda is also proposed for governance, adoption, infrastructure, privacy, and security.}, } @article {pmid39111449, year = {2024}, author = {Wan, L and Kendall, AD and Rapp, J and Hyndman, DW}, title = {Mapping agricultural tile drainage in the US Midwest using explainable random forest machine learning and satellite imagery.}, journal = {The Science of the total environment}, volume = {950}, number = {}, pages = {175283}, doi = {10.1016/j.scitotenv.2024.175283}, pmid = {39111449}, issn = {1879-1026}, abstract = {There has been an increase in tile drained area across the US Midwest and other regions worldwide due to agricultural expansion, intensification, and climate variability. Despite this growth, spatially explicit tile drainage maps remain scarce, which limits the accuracy of hydrologic modeling and implementation of nutrient reduction strategies. Here, we developed a machine-learning model to provide a Spatially Explicit Estimate of Tile Drainage (SEETileDrain) across the US Midwest in 2017 at a 30-m resolution. This model used 31 satellite-derived and environmental features after removing less important and highly correlated features. It was trained with 60,938 tile and non-tile ground truth points within the Google Earth Engine cloud-computing platform. We also used multiple feature importance metrics and Accumulated Local Effects to interpret the machine learning model. The results show that our model achieved good accuracy, with 96 % of points classified correctly and an F1 score of 0.90. When tile drainage area is aggregated to the county scale, it agreed well (r[2] = 0.69) with the reported area from the Ag Census. We found that Land Surface Temperature (LST) along with climate- and soil-related features were the most important factors for classification. The top-ranked feature is the median summer nighttime LST, followed by median summer soil moisture percent. This study demonstrates the potential of applying satellite remote sensing to map spatially explicit agricultural tile drainage across large regions. The results should be useful for land use change monitoring and hydrologic and nutrient models, including those designed to achieve cost-effective agricultural water and nutrient management strategies. The algorithms developed here should also be applicable for other remote sensing mapping applications.}, } @article {pmid39107423, year = {2024}, author = {Ramdani, F and Setiani, P and Sianturi, R}, title = {Towards understanding climate change impacts: monitoring the vegetation dynamics of terrestrial national parks in Indonesia.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {18257}, pmid = {39107423}, issn = {2045-2322}, mesh = {*Climate Change ; Indonesia ; *Parks, Recreational ; *Conservation of Natural Resources ; Seasons ; Environmental Monitoring/methods ; Ecosystem ; Plants ; }, abstract = {Monitoring vegetation dynamics in terrestrial national parks (TNPs) is crucial for ensuring sustainable environmental management and mitigating the potential negative impacts of short- and long-term disturbances understanding the effect of climate change within natural and protected areas. This study aims to monitor the vegetation dynamics of TNPs in Indonesia by first categorizing them into the regions of Sumatra, Jawa, Kalimantan, Sulawesi, and Eastern Indonesia and then applying ready-to-use MODIS EVI time-series imageries (MOD13Q1) taken from 2000 to 2022 on the GEE cloud-computing platform. Specifically, this research investigates the greening and browning fraction trends using Sen's slope, considers seasonality by analyzing the maximum and minimum EVI values, and assesses anomalous years by comparing the annual time series and long-term median EVI value. The findings reveal significantly increasing greening trends in most TNPs, except Danau Sentarum, from 2000 to 2022. The seasonality analysis shows that most TNPs exhibit peak and trough greenness at the end of the rainy and dry seasons, respectively, as the vegetation response to precipitation increases and decreases. Anomalies in seasonality that is affected by climate change was detected in all of the regions. To increase TNPs resilience, suggested measures include active reforestation and implementation of Assisted Natural Regeneration, strengthen the enforcement of fundamental managerial task, and forest fire management.}, } @article {pmid39101486, year = {2024}, author = {Ruprecht, NA and Kennedy, JD and Bansal, B and Singhal, S and Sens, D and Maggio, A and Doe, V and Hawkins, D and Campbel, R and O'Connell, K and Gill, JS and Schaefer, K and Singhal, SK}, title = {Transcriptomics and epigenetic data integration learning module on Google Cloud.}, journal = {Briefings in bioinformatics}, volume = {25}, number = {Supplement_1}, pages = {}, pmid = {39101486}, issn = {1477-4054}, support = {P20 GM103442/GM/NIGMS NIH HHS/United States ; P20GM103442//National Institute of General Medical Sciences of the National Institutes of Health/ ; }, mesh = {Humans ; *Cloud Computing ; *Epigenomics/methods ; Epigenesis, Genetic ; Transcriptome ; Computational Biology/methods ; Gene Expression Profiling/methods ; Software ; Data Mining/methods ; }, abstract = {Multi-omics (genomics, transcriptomics, epigenomics, proteomics, metabolomics, etc.) research approaches are vital for understanding the hierarchical complexity of human biology and have proven to be extremely valuable in cancer research and precision medicine. Emerging scientific advances in recent years have made high-throughput genome-wide sequencing a central focus in molecular research by allowing for the collective analysis of various kinds of molecular biological data from different types of specimens in a single tissue or even at the level of a single cell. Additionally, with the help of improved computational resources and data mining, researchers are able to integrate data from different multi-omics regimes to identify new prognostic, diagnostic, or predictive biomarkers, uncover novel therapeutic targets, and develop more personalized treatment protocols for patients. For the research community to parse the scientifically and clinically meaningful information out of all the biological data being generated each day more efficiently with less wasted resources, being familiar with and comfortable using advanced analytical tools, such as Google Cloud Platform becomes imperative. This project is an interdisciplinary, cross-organizational effort to provide a guided learning module for integrating transcriptomics and epigenetics data analysis protocols into a comprehensive analysis pipeline for users to implement in their own work, utilizing the cloud computing infrastructure on Google Cloud. The learning module consists of three submodules that guide the user through tutorial examples that illustrate the analysis of RNA-sequence and Reduced-Representation Bisulfite Sequencing data. The examples are in the form of breast cancer case studies, and the data sets were procured from the public repository Gene Expression Omnibus. The first submodule is devoted to transcriptomics analysis with the RNA sequencing data, the second submodule focuses on epigenetics analysis using the DNA methylation data, and the third submodule integrates the two methods for a deeper biological understanding. The modules begin with data collection and preprocessing, with further downstream analysis performed in a Vertex AI Jupyter notebook instance with an R kernel. Analysis results are returned to Google Cloud buckets for storage and visualization, removing the computational strain from local resources. The final product is a start-to-finish tutorial for the researchers with limited experience in multi-omics to integrate transcriptomics and epigenetics data analysis into a comprehensive pipeline to perform their own biological research.This manuscript describes the development of a resource module that is part of a learning platform named ``NIGMS Sandbox for Cloud-based Learning'' https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox [16] at the beginning of this Supplement. This module delivers learning materials on the analysis of bulk and single-cell ATAC-seq data in an interactive format that uses appropriate cloud resources for data access and analyses.}, } @article {pmid39098886, year = {2024}, author = {John, J and John Singh, K}, title = {Trust value evaluation of cloud service providers using fuzzy inference based analytical process.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {18028}, pmid = {39098886}, issn = {2045-2322}, abstract = {Users can purchase virtualized computer resources using the cloud computing concept, which is a novel and innovative way of computing. It offers numerous advantages for IT and healthcare industries over traditional methods. However, a lack of trust between CSUs and CSPs is hindering the widespread adoption of cloud computing across industries. Since cloud computing offers a wide range of trust models and strategies, it is essential to analyze the service using a detailed methodology in order to choose the appropriate cloud service for various user types. Finding a wide variety of comprehensive elements that are both required and sufficient for evaluating any cloud service is vital in order to achieve that. As a result, this study suggests an accurate, fuzzy logic-based trust evaluation model for evaluating the trustworthiness of a cloud service provider. Here, we examine how fuzzy logic raises the efficiency of trust evaluation. Trust is assessed using Quality of Service (QoS) characteristics like security, privacy, dynamicity, data integrity, and performance. The outcomes of a MATLAB simulation demonstrate the viability of the suggested strategy in a cloud setting.}, } @article {pmid39097607, year = {2024}, author = {Zhang, H and Li, J and Yang, H}, title = {Cloud computing load prediction method based on CNN-BiLSTM model under low-carbon background.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {18004}, pmid = {39097607}, issn = {2045-2322}, support = {XJ2023004301//Basic scientific research business fee of central colleges and universities/ ; }, abstract = {With the establishment of the "double carbon" goal, various industries are actively exploring ways to reduce carbon emissions. Cloud data centers, represented by cloud computing, often have the problem of mismatch between load requests and resource supply, resulting in excessive carbon emissions. Based on this, this paper proposes a complete method for cloud computing carbon emission prediction. Firstly, the convolutional neural network and bidirectional long-term and short-term memory neural network (CNN-BiLSTM) combined model are used to predict the cloud computing load. The real-time prediction power is obtained by real-time prediction load of cloud computing, and then the carbon emission prediction is obtained by power calculation. Develop a dynamic server carbon emission prediction model, so that the server carbon emission can change with the change of CPU utilization, so as to achieve the purpose of low carbon emission reduction. In this paper, Google cluster data is used to predict the load. The experimental results show that the CNN-BiLSTM combined model has good prediction effect. Compared with the multi-layer feed forward neural network model (BP), long short-term memory network model (LSTM), bidirectional long short-term memory network model (BiLSTM), modal decomposition and convolution long time series neural network model (CEEMDAN-ConvLSTM), the MSE index decreased by 52 % , 50 % , 34 % and 45 % respectively.}, } @article {pmid39092509, year = {2024}, author = {Okoniewski, MJ and Wiegand, A and Schmid, DC and Bolliger, C and Bovino, C and Belluco, M and Wüst, T and Byrde, O and Maffioletti, S and Rinn, B}, title = {Leonhard Med, a trusted research environment for processing sensitive research data.}, journal = {Journal of integrative bioinformatics}, volume = {21}, number = {3}, pages = {}, pmid = {39092509}, issn = {1613-4516}, mesh = {*Computer Security ; Humans ; Confidentiality ; Biomedical Research ; Software ; }, abstract = {This paper provides an overview of the development and operation of the Leonhard Med Trusted Research Environment (TRE) at ETH Zurich. Leonhard Med gives scientific researchers the ability to securely work on sensitive research data. We give an overview of the user perspective, the legal framework for processing sensitive data, design history, current status, and operations. Leonhard Med is an efficient, highly secure Trusted Research Environment for data processing, hosted at ETH Zurich and operated by the Scientific IT Services (SIS) of ETH. It provides a full stack of security controls that allow researchers to store, access, manage, and process sensitive data according to Swiss legislation and ETH Zurich Data Protection policies. In addition, Leonhard Med fulfills the BioMedIT Information Security Policies and is compatible with international data protection laws and therefore can be utilized within the scope of national and international collaboration research projects. Initially designed as a "bare-metal" High-Performance Computing (HPC) platform to achieve maximum performance, Leonhard Med was later re-designed as a virtualized, private cloud platform to offer more flexibility to its customers. Sensitive data can be analyzed in secure, segregated spaces called tenants. Technical and Organizational Measures (TOMs) are in place to assure the confidentiality, integrity, and availability of sensitive data. At the same time, Leonhard Med ensures broad access to cutting-edge research software, especially for the analysis of human -omics data and other personalized health applications.}, } @article {pmid39088558, year = {2024}, author = {Tawfik, M}, title = {Optimized intrusion detection in IoT and fog computing using ensemble learning and advanced feature selection.}, journal = {PloS one}, volume = {19}, number = {8}, pages = {e0304082}, pmid = {39088558}, issn = {1932-6203}, mesh = {*Cloud Computing ; *Internet of Things ; Computer Security ; Neural Networks, Computer ; Algorithms ; Machine Learning ; }, abstract = {The proliferation of Internet of Things (IoT) devices and fog computing architectures has introduced major security and cyber threats. Intrusion detection systems have become effective in monitoring network traffic and activities to identify anomalies that are indicative of attacks. However, constraints such as limited computing resources at fog nodes render conventional intrusion detection techniques impractical. This paper proposes a novel framework that integrates stacked autoencoders, CatBoost, and an optimised transformer-CNN-LSTM ensemble tailored for intrusion detection in fog and IoT networks. Autoencoders extract robust features from high-dimensional traffic data while reducing the dimensionality of the efficiency at fog nodes. CatBoost refines features through predictive selection. The ensemble model combines self-attention, convolutions, and recurrence for comprehensive traffic analysis in the cloud. Evaluations of the NSL-KDD, UNSW-NB15, and AWID benchmarks demonstrate an accuracy of over 99% in detecting threats across traditional, hybrid enterprises and wireless environments. Integrated edge preprocessing and cloud-based ensemble learning pipelines enable efficient and accurate anomaly detection. The results highlight the viability of securing real-world fog and the IoT infrastructure against continuously evolving cyber-attacks.}, } @article {pmid39081193, year = {2024}, author = {Mer, P and Limbachiya, C}, title = {Electron-driven molecular processes for cyanopolyacetylenes HC2n+1N (n = 3, 4, and 5).}, journal = {Physical chemistry chemical physics : PCCP}, volume = {26}, number = {32}, pages = {21504-21512}, doi = {10.1039/d4cp02665a}, pmid = {39081193}, issn = {1463-9084}, abstract = {Linear carbon series cyanopolyacetylenes (HC2n+1N) (n = 3, 4, and 5) are astromolecules found in the atmosphere of Titan and interstellar media such as TMC-1 (Taurus molecular cloud-1). All these compounds are also detected in IRC + 10 216. In the present work, we comprehensively investigate electron interaction with important cyanopolyacetylene compounds, viz. HC7N (cyano-tri-acetylene), HC9N (cyano-tetra-acetylene), and HC11N (cyano-penta-acetylene). The study covers incident electron energies ranging from the ionization threshold to 5 keV. Various electron-driven molecular processes are quantified in terms of total cross-sections. The quantum spherical complex optical potential (SCOP) is used to determine elastic (Qel) and inelastic (Qinel) cross-sections. Ionization is the most important inelastic effect that opens various chemical pathways for the generation of different molecular species; we computed the ionization cross-section (Qion) and discrete electronic excitation cross-section (ΣQexc) using the complex scattering potential-ionization contribution (CSP-ic) method. The cyanopolyacetylene compounds are difficult to handle experimentally owing to the health risks involved. Therefore, there are no prior experimental data available for these molecules; only Qion have been reported theoretically. Thus, the present work is the maiden report on computing Qel, Qinel, ΣQexc, and QT. In order to provide an alternative approach and further validation of the present work, we employed our recently developed two-parameter semi-empirical method (2p-SEM) to compute Qel and QT. Additionally, we predict the polarizability of the HC11N molecule, which has not been reported in the existing literature. This prediction is based on a correlation study of polarizabilities of molecules with Qion values from the same series of molecules.}, } @article {pmid39078761, year = {2025}, author = {Lee, SY and Ku, MY and Tseng, WC and Chen, JY}, title = {AI Accelerator With Ultralightweight Time-Period CNN-Based Model for Arrhythmia Classification.}, journal = {IEEE transactions on biomedical circuits and systems}, volume = {19}, number = {1}, pages = {16-27}, doi = {10.1109/TBCAS.2024.3435718}, pmid = {39078761}, issn = {1940-9990}, mesh = {Humans ; *Arrhythmias, Cardiac/diagnosis/classification/physiopathology ; Electrocardiography/methods ; *Neural Networks, Computer ; Algorithms ; Signal Processing, Computer-Assisted ; *Artificial Intelligence ; Databases, Factual ; }, abstract = {This work proposes a classification system for arrhythmias, aiming to enhance the efficiency of the diagnostic process for cardiologists. The proposed algorithm includes a naive preprocessing procedure for electrocardiography (ECG) data applicable to various ECG databases. Additionally, this work proposes an ultralightweight model for arrhythmia classification based on a convolutional neural network and incorporating R-peak interval features to represent long-term rhythm information, thereby improving the model's classification performance. The proposed model is trained and tested by using the MIT-BIH and NCKU-CBIC databases in accordance with the classification standards of the Association for the Advancement of Medical Instrumentation (AAMI), achieving high accuracies of 98.32% and 97.1%. This work applies the arrhythmia classification algorithm to a web-based system, thus providing a graphical interface. The cloud-based execution of automated artificial intelligence (AI) classification allows cardiologists and patients to view ECG wave conditions instantly, thereby remarkably enhancing the quality of medical examination. This work also designs a customized integrated circuit for the hardware implementation of an AI accelerator. The accelerator utilizes a parallelized processing element array architecture to perform convolution and fully connected layer operations. It introduces proposed hybrid stationary techniques, combining input and weight stationary modes to increase data reuse drastically and reduce hardware execution cycles and power consumption, ultimately achieving high-performance computing. This accelerator is implemented in the form of a chip by using the TSMC 180 nm CMOS process. It exhibits a power consumption of 122 µW, a classification latency of 6.8 ms, and an energy efficiency of 0.83 µJ/classification.}, } @article {pmid39071997, year = {2024}, author = {Rehman, SU and Sadek, I and Huang, B and Manickam, S and Mahmoud, LN}, title = {IoT-based emergency cardiac death risk rescue alert system.}, journal = {MethodsX}, volume = {13}, number = {}, pages = {102834}, pmid = {39071997}, issn = {2215-0161}, abstract = {The use of technology in healthcare is one of the most critical application areas today. With the development of medical applications, people's quality of life has improved. However, it is impractical and unnecessary for medium-risk people to receive specialized daily hospital monitoring. Due to their health status, they will be exposed to a high risk of severe health damage or even life-threatening conditions without monitoring. Therefore, remote, real-time, low-cost, wearable, and effective monitoring is ideal for this problem. Many researchers mentioned that their studies could use electrocardiogram (ECG) detection to discover emergencies. However, how to respond to discovered emergencies in household life is still a research gap in this field.•This paper proposes a real-time monitoring of ECG signals and sending them to the cloud for Sudden Cardiac Death (SCD) prediction.•Unlike previous studies, the proposed system has an additional emergency response mechanism to alert nearby community healthcare workers when SCD is predicted to occur.}, } @article {pmid39056390, year = {2024}, author = {Bigi, F and Pozdnyakov, SN and Ceriotti, M}, title = {Wigner kernels: Body-ordered equivariant machine learning without a basis.}, journal = {The Journal of chemical physics}, volume = {161}, number = {4}, pages = {}, doi = {10.1063/5.0208746}, pmid = {39056390}, issn = {1089-7690}, abstract = {Machine-learning models based on a point-cloud representation of a physical object are ubiquitous in scientific applications and particularly well-suited to the atomic-scale description of molecules and materials. Among the many different approaches that have been pursued, the description of local atomic environments in terms of their discretized neighbor densities has been used widely and very successfully. We propose a novel density-based method, which involves computing "Wigner kernels." These are fully equivariant and body-ordered kernels that can be computed iteratively at a cost that is independent of the basis used to discretize the density and grows only linearly with the maximum body-order considered. Wigner kernels represent the infinite-width limit of feature-space models, whose dimensionality and computational cost instead scale exponentially with the increasing order of correlations. We present several examples of the accuracy of models based on Wigner kernels in chemical applications, for both scalar and tensorial targets, reaching an accuracy that is competitive with state-of-the-art deep-learning architectures. We discuss the broader relevance of these findings to equivariant geometric machine-learning.}, } @article {pmid39054942, year = {2024}, author = {Sharma, S and Tyagi, S}, title = {A fourfold-objective-based cloud privacy preservation model with proposed association rule hiding and deep learning assisted optimal key generation.}, journal = {Network (Bristol, England)}, volume = {}, number = {}, pages = {1-36}, doi = {10.1080/0954898X.2024.2378836}, pmid = {39054942}, issn = {1361-6536}, abstract = {Numerous studies have been conducted in an attempt to preserve cloud privacy, yet the majority of cutting-edge solutions fall short when it comes to handling sensitive data. This research proposes a "privacy preservation model in the cloud environment". The four stages of recommended security preservation methodology are "identification of sensitive data, generation of an optimal tuned key, suggested data sanitization, and data restoration". Initially, owner's data enters the Sensitive data identification process. The sensitive information in the input (owner's data) is identified via Augmented Dynamic Itemset Counting (ADIC) based Associative Rule Mining Model. Subsequently, the identified sensitive data are sanitized via the newly created tuned key. The generated tuned key is formulated with new fourfold objective-hybrid optimization approach-based deep learning approach. The optimally tuned key is generated with LSTM on the basis of fourfold objectives and the new hybrid MUAOA. The created keys, as well as generated sensitive rules, are fed into the deep learning model. The MUAOA technique is a conceptual blend of standard AOA and CMBO, respectively. As a result, unauthorized people will be unable to access information. Finally, comparative evaluation is undergone and proposed LSTM+MUAOA has achieved higher values on privacy about 5.21 compared to other existing models.}, } @article {pmid39049325, year = {2024}, author = {Chen, SY and Tu, MH}, title = {Use Mobile Apps to Link to Google Forms to Conduct Online Surveys.}, journal = {Studies in health technology and informatics}, volume = {315}, number = {}, pages = {567-568}, doi = {10.3233/SHTI240219}, pmid = {39049325}, issn = {1879-8365}, mesh = {Taiwan ; Humans ; *Mobile Applications ; Surveys and Questionnaires ; Coronary Artery Disease ; Anxiety ; Male ; Female ; Middle Aged ; Internet ; }, abstract = {The study aimed to evaluate changes in anxiety levels in patients with coronary artery disease before and after cardiac catheterization. The mobile applications LINE and GOOGLE were used to collect online data. A total of 188 patients participated in the study conducted at a regional teaching hospital in eastern Taiwan, and 51 of them completed the questionnaire twice, with a response rate of 27.1%. Although the second study noted the problem of incomplete data and low response rates, this study shows that online research methodology can still be improved and that using electronic questionnaires for data collection and statistical analysis reduces the risk of errors in online research and saves time in documentation. It is recommended to provide clear and detailed instructions when conducting online surveys and to review them carefully upon completion to ensure the completeness of the data collected.}, } @article {pmid39041916, year = {2024}, author = {Nguyen, H and Pham, VD and Nguyen, H and Tran, B and Petereit, J and Nguyen, T}, title = {CCPA: cloud-based, self-learning modules for consensus pathway analysis using GO, KEGG and Reactome.}, journal = {Briefings in bioinformatics}, volume = {25}, number = {Supplement_1}, pages = {}, pmid = {39041916}, issn = {1477-4054}, support = {GM103440 and 1R44GM152152-01/GM/NIGMS NIH HHS/United States ; 2343019 and 2203236//National Science Foundation/ ; 80NSSC22M0255/NASA/NASA/United States ; R44 GM152152/GM/NIGMS NIH HHS/United States ; U01 CA274573/CA/NCI NIH HHS/United States ; 1U01CA274573-01A1/CA/NCI NIH HHS/United States ; P20 GM103440/GM/NIGMS NIH HHS/United States ; }, mesh = {*Cloud Computing ; *Software ; Humans ; Computational Biology/methods/education ; Animals ; Gene Ontology ; }, abstract = {This manuscript describes the development of a resource module that is part of a learning platform named 'NIGMS Sandbox for Cloud-based Learning' (https://github.com/NIGMS/NIGMS-Sandbox). The module delivers learning materials on Cloud-based Consensus Pathway Analysis in an interactive format that uses appropriate cloud resources for data access and analyses. Pathway analysis is important because it allows us to gain insights into biological mechanisms underlying conditions. But the availability of many pathway analysis methods, the requirement of coding skills, and the focus of current tools on only a few species all make it very difficult for biomedical researchers to self-learn and perform pathway analysis efficiently. Furthermore, there is a lack of tools that allow researchers to compare analysis results obtained from different experiments and different analysis methods to find consensus results. To address these challenges, we have designed a cloud-based, self-learning module that provides consensus results among established, state-of-the-art pathway analysis techniques to provide students and researchers with necessary training and example materials. The training module consists of five Jupyter Notebooks that provide complete tutorials for the following tasks: (i) process expression data, (ii) perform differential analysis, visualize and compare the results obtained from four differential analysis methods (limma, t-test, edgeR, DESeq2), (iii) process three pathway databases (GO, KEGG and Reactome), (iv) perform pathway analysis using eight methods (ORA, CAMERA, KS test, Wilcoxon test, FGSEA, GSA, SAFE and PADOG) and (v) combine results of multiple analyses. We also provide examples, source code, explanations and instructional videos for trainees to complete each Jupyter Notebook. The module supports the analysis for many model (e.g. human, mouse, fruit fly, zebra fish) and non-model species. The module is publicly available at https://github.com/NIGMS/Consensus-Pathway-Analysis-in-the-Cloud. This manuscript describes the development of a resource module that is part of a learning platform named ``NIGMS Sandbox for Cloud-based Learning'' https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox [1] at the beginning of this Supplement. This module delivers learning materials on the analysis of bulk and single-cell ATAC-seq data in an interactive format that uses appropriate cloud resources for data access and analyses.}, } @article {pmid39041915, year = {2024}, author = {Woessner, AE and Anjum, U and Salman, H and Lear, J and Turner, JT and Campbell, R and Beaudry, L and Zhan, J and Cornett, LE and Gauch, S and Quinn, KP}, title = {Identifying and training deep learning neural networks on biomedical-related datasets.}, journal = {Briefings in bioinformatics}, volume = {25}, number = {Supplement_1}, pages = {}, pmid = {39041915}, issn = {1477-4054}, support = {P20 GM139768/GM/NIGMS NIH HHS/United States ; R01EB031032/NH/NIH HHS/United States ; 3P20GM103429-21S2//National Institutes of General Medical Sciences (NIGMS)/ ; NIH P20GM139768//Arkansas Integrative Metabolic Research Center/ ; }, mesh = {*Deep Learning ; *Neural Networks, Computer ; Humans ; Biomedical Research ; Algorithms ; Cloud Computing ; }, abstract = {This manuscript describes the development of a resources module that is part of a learning platform named 'NIGMS Sandbox for Cloud-based Learning' https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox at the beginning of this Supplement. This module delivers learning materials on implementing deep learning algorithms for biomedical image data in an interactive format that uses appropriate cloud resources for data access and analyses. Biomedical-related datasets are widely used in both research and clinical settings, but the ability for professionally trained clinicians and researchers to interpret datasets becomes difficult as the size and breadth of these datasets increases. Artificial intelligence, and specifically deep learning neural networks, have recently become an important tool in novel biomedical research. However, use is limited due to their computational requirements and confusion regarding different neural network architectures. The goal of this learning module is to introduce types of deep learning neural networks and cover practices that are commonly used in biomedical research. This module is subdivided into four submodules that cover classification, augmentation, segmentation and regression. Each complementary submodule was written on the Google Cloud Platform and contains detailed code and explanations, as well as quizzes and challenges to facilitate user training. Overall, the goal of this learning module is to enable users to identify and integrate the correct type of neural network with their data while highlighting the ease-of-use of cloud computing for implementing neural networks. This manuscript describes the development of a resource module that is part of a learning platform named ``NIGMS Sandbox for Cloud-based Learning'' https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox [1] at the beginning of this Supplement. This module delivers learning materials on the analysis of bulk and single-cell ATAC-seq data in an interactive format that uses appropriate cloud resources for data access and analyses.}, } @article {pmid39041914, year = {2024}, author = {O'Connell, KA and Kopchick, B and Carlson, T and Belardo, D and Byrum, SD}, title = {Understanding proteome quantification in an interactive learning module on Google Cloud Platform.}, journal = {Briefings in bioinformatics}, volume = {25}, number = {Supplement_1}, pages = {}, pmid = {39041914}, issn = {1477-4054}, support = {//UAMS Winthrop P. Rockefeller Cancer Institute/ ; OIA-1946391//National Science Foundation Award/ ; R24GM137786//National Institutes of Health National Institute of General Medical Sciences (NIH/NIGMS)/ ; }, mesh = {*Cloud Computing ; *Proteome/metabolism ; *Proteomics/methods ; *Software ; Mass Spectrometry ; Humans ; }, abstract = {This manuscript describes the development of a resource module that is part of a learning platform named 'NIGMS Sandbox for Cloud-based Learning' https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox at the beginning of this Supplement. This module delivers learning materials on protein quantification in an interactive format that uses appropriate cloud resources for data access and analyses. Quantitative proteomics is a rapidly growing discipline due to the cutting-edge technologies of high resolution mass spectrometry. There are many data types to consider for proteome quantification including data dependent acquisition, data independent acquisition, multiplexing with Tandem Mass Tag reporter ions, spectral counts, and more. As part of the NIH NIGMS Sandbox effort, we developed a learning module to introduce students to mass spectrometry terminology, normalization methods, statistical designs, and basics of R programming. By utilizing the Google Cloud environment, the learning module is easily accessible without the need for complex installation procedures. The proteome quantification module demonstrates the analysis using a provided TMT10plex data set using MS3 reporter ion intensity quantitative values in a Jupyter notebook with an R kernel. The learning module begins with the raw intensities, performs normalization, and differential abundance analysis using limma models, and is designed for researchers with a basic understanding of mass spectrometry and R programming language. Learners walk away with a better understanding of how to navigate Google Cloud Platform for proteomic research, and with the basics of mass spectrometry data analysis at the command line. This manuscript describes the development of a resource module that is part of a learning platform named ``NIGMS Sandbox for Cloud-based Learning'' https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox [1] at the beginning of this Supplement. This module delivers learning materials on the analysis of bulk and single-cell ATAC-seq data in an interactive format that uses appropriate cloud resources for data access and analyses.}, } @article {pmid39041913, year = {2024}, author = {Qin, Y and Maggio, A and Hawkins, D and Beaudry, L and Kim, A and Pan, D and Gong, T and Fu, Y and Yang, H and Deng, Y}, title = {Whole-genome bisulfite sequencing data analysis learning module on Google Cloud Platform.}, journal = {Briefings in bioinformatics}, volume = {25}, number = {Supplement_1}, pages = {}, pmid = {39041913}, issn = {1477-4054}, support = {P20 GM103466/GM/NIGMS NIH HHS/United States ; U54 HG013243/HG/NHGRI NIH HHS/United States ; U54 MD007601/MD/NIMHD NIH HHS/United States ; P20GM103466/NH/NIH HHS/United States ; }, mesh = {*Cloud Computing ; *DNA Methylation ; *Whole Genome Sequencing/methods ; *Software ; Sulfites/chemistry ; Humans ; Epigenesis, Genetic ; Computational Biology/methods ; }, abstract = {This study describes the development of a resource module that is part of a learning platform named 'NIGMS Sandbox for Cloud-based Learning' https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox at the beginning of this Supplement. This module is designed to facilitate interactive learning of whole-genome bisulfite sequencing (WGBS) data analysis utilizing cloud-based tools in Google Cloud Platform, such as Cloud Storage, Vertex AI notebooks and Google Batch. WGBS is a powerful technique that can provide comprehensive insights into DNA methylation patterns at single cytosine resolution, essential for understanding epigenetic regulation across the genome. The designed learning module first provides step-by-step tutorials that guide learners through two main stages of WGBS data analysis, preprocessing and the identification of differentially methylated regions. And then, it provides a streamlined workflow and demonstrates how to effectively use it for large datasets given the power of cloud infrastructure. The integration of these interconnected submodules progressively deepens the user's understanding of the WGBS analysis process along with the use of cloud resources. Through this module, we can enhance the accessibility and adoption of cloud computing in epigenomic research, speeding up the advancements in the related field and beyond. This manuscript describes the development of a resource module that is part of a learning platform named ``NIGMS Sandbox for Cloud-based Learning'' https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox [1] at the beginning of this Supplement. This module delivers learning materials on the analysis of bulk and single-cell ATAC-seq data in an interactive format that uses appropriate cloud resources for data access and analyses.}, } @article {pmid39041912, year = {2024}, author = {Hemme, CL and Beaudry, L and Yosufzai, Z and Kim, A and Pan, D and Campbell, R and Price, M and Cho, BP}, title = {A cloud-based learning module for biomarker discovery.}, journal = {Briefings in bioinformatics}, volume = {25}, number = {Supplement_1}, pages = {}, pmid = {39041912}, issn = {1477-4054}, support = {P20 GM103430/GM/NIGMS NIH HHS/United States ; P20GM103430/NH/NIH HHS/United States ; }, mesh = {*Cloud Computing ; *Biomarkers/metabolism ; Animals ; Software ; Humans ; Rats ; Machine Learning ; Computational Biology/methods ; }, abstract = {This manuscript describes the development of a resource module that is part of a learning platform named "NIGMS Sandbox for Cloud-based Learning" https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox at the beginning of this Supplement. This module delivers learning materials on basic principles in biomarker discovery in an interactive format that uses appropriate cloud resources for data access and analyses. In collaboration with Google Cloud, Deloitte Consulting and NIGMS, the Rhode Island INBRE Molecular Informatics Core developed a cloud-based training module for biomarker discovery. The module consists of nine submodules covering various topics on biomarker discovery and assessment and is deployed on the Google Cloud Platform and available for public use through the NIGMS Sandbox. The submodules are written as a series of Jupyter Notebooks utilizing R and Bioconductor for biomarker and omics data analysis. The submodules cover the following topics: 1) introduction to biomarkers; 2) introduction to R data structures; 3) introduction to linear models; 4) introduction to exploratory analysis; 5) rat renal ischemia-reperfusion injury case study; (6) linear and logistic regression for comparison of quantitative biomarkers; 7) exploratory analysis of proteomics IRI data; 8) identification of IRI biomarkers from proteomic data; and 9) machine learning methods for biomarker discovery. Each notebook includes an in-line quiz for self-assessment on the submodule topic and an overview video is available on YouTube (https://www.youtube.com/watch?v=2-Q9Ax8EW84). This manuscript describes the development of a resource module that is part of a learning platform named ``NIGMS Sandbox for Cloud-based Learning'' https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox [1] at the beginning of this Supplement. This module delivers learning materials on the analysis of bulk and single-cell ATAC-seq data in an interactive format that uses appropriate cloud resources for data access and analyses.}, } @article {pmid39041911, year = {2024}, author = {Wilkins, OM and Campbell, R and Yosufzai, Z and Doe, V and Soucy, SM}, title = {Cloud-based introduction to BASH programming for biologists.}, journal = {Briefings in bioinformatics}, volume = {25}, number = {Supplement_1}, pages = {}, pmid = {39041911}, issn = {1477-4054}, support = {P20 GM103506/GM/NIGMS NIH HHS/United States ; P20GM130454//National Institutes of General Medical Science/ ; }, mesh = {*Cloud Computing ; *Software ; *Computational Biology/methods ; Programming Languages ; High-Throughput Nucleotide Sequencing/methods ; Genomics/methods ; Humans ; }, abstract = {This manuscript describes the development of a resource module that is part of a learning platform named 'NIGMS Sandbox for Cloud-based Learning', https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial authored by National Institute of General Medical Sciences: NIGMS Sandbox: A Learning Platform toward Democratizing Cloud Computing for Biomedical Research at the beginning of this supplement. This module delivers learning materials introducing the utility of the BASH (Bourne Again Shell) programming language for genomic data analysis in an interactive format that uses appropriate cloud resources for data access and analyses. The next-generation sequencing revolution has generated massive amounts of novel biological data from a multitude of platforms that survey an ever-growing list of genomic modalities. These data require significant downstream computational and statistical analyses to glean meaningful biological insights. However, the skill sets required to generate these data are vastly different from the skills required to analyze these data. Bench scientists that generate next-generation data often lack the training required to perform analysis of these datasets and require support from bioinformatics specialists. Dedicated computational training is required to empower biologists in the area of genomic data analysis, however, learning to efficiently leverage a command line interface is a significant barrier in learning how to leverage common analytical tools. Cloud platforms have the potential to democratize access to the technical tools and computational resources necessary to work with modern sequencing data, providing an effective framework for bioinformatics education. This module aims to provide an interactive platform that slowly builds technical skills and knowledge needed to interact with genomics data on the command line in the Cloud. The sandbox format of this module enables users to move through the material at their own pace and test their grasp of the material with knowledge self-checks before building on that material in the next sub-module. This manuscript describes the development of a resource module that is part of a learning platform named ``NIGMS Sandbox for Cloud-based Learning'' https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox [1] at the beginning of this Supplement. This module delivers learning materials on the analysis of bulk and single-cell ATAC-seq data in an interactive format that uses appropriate cloud resources for data access and analyses.}, } @article {pmid39041910, year = {2024}, author = {Veerappa, AM and Rowley, MJ and Maggio, A and Beaudry, L and Hawkins, D and Kim, A and Sethi, S and Sorgen, PL and Guda, C}, title = {CloudATAC: a cloud-based framework for ATAC-Seq data analysis.}, journal = {Briefings in bioinformatics}, volume = {25}, number = {Supplement_1}, pages = {}, pmid = {39041910}, issn = {1477-4054}, support = {P20 GM103427/GM/NIGMS NIH HHS/United States ; NIH/NIGMS P20 GM103427//NOSI supplement to the parent IDeA Networks of Biomedical Research Excellence (INBRE) Program/ ; }, mesh = {*Cloud Computing ; *Software ; *High-Throughput Nucleotide Sequencing/methods ; Humans ; Computational Biology/methods ; Chromatin Immunoprecipitation Sequencing/methods ; Single-Cell Analysis/methods ; Chromatin/genetics/metabolism ; }, abstract = {Assay for transposase-accessible chromatin with high-throughput sequencing (ATAC-seq) generates genome-wide chromatin accessibility profiles, providing valuable insights into epigenetic gene regulation at both pooled-cell and single-cell population levels. Comprehensive analysis of ATAC-seq data involves the use of various interdependent programs. Learning the correct sequence of steps needed to process the data can represent a major hurdle. Selecting appropriate parameters at each stage, including pre-analysis, core analysis, and advanced downstream analysis, is important to ensure accurate analysis and interpretation of ATAC-seq data. Additionally, obtaining and working within a limited computational environment presents a significant challenge to non-bioinformatic researchers. Therefore, we present Cloud ATAC, an open-source, cloud-based interactive framework with a scalable, flexible, and streamlined analysis framework based on the best practices approach for pooled-cell and single-cell ATAC-seq data. These frameworks use on-demand computational power and memory, scalability, and a secure and compliant environment provided by the Google Cloud. Additionally, we leverage Jupyter Notebook's interactive computing platform that combines live code, tutorials, narrative text, flashcards, quizzes, and custom visualizations to enhance learning and analysis. Further, leveraging GPU instances has significantly improved the run-time of the single-cell framework. The source codes and data are publicly available through NIH Cloud lab https://github.com/NIGMS/ATAC-Seq-and-Single-Cell-ATAC-Seq-Analysis. This manuscript describes the development of a resource module that is part of a learning platform named ``NIGMS Sandbox for Cloud-based Learning'' https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox [1] at the beginning of this Supplement. This module delivers learning materials on the analysis of bulk and single-cell ATAC-seq data in an interactive format that uses appropriate cloud resources for data access and analyses.}, } @article {pmid39040324, year = {2024}, author = {Almalawi, A and Zafar, A and Unhelkar, B and Hassan, S and Alqurashi, F and Khan, AI and Fahad, A and Alam, MM}, title = {Enhancing security in smart healthcare systems: Using intelligent edge computing with a novel Salp Swarm Optimization and radial basis neural network algorithm.}, journal = {Heliyon}, volume = {10}, number = {13}, pages = {e33792}, pmid = {39040324}, issn = {2405-8440}, abstract = {A smart healthcare system (SHS) is a health service system that employs advanced technologies such as wearable devices, the Internet of Things (IoT), and mobile internet to dynamically access information and connect people and institutions related to healthcare, thereby actively managing and responding to medical ecosystem needs. Edge computing (EC) plays a significant role in SHS as it enables real-time data processing and analysis at the data source, which reduces latency and improves medical intervention speed. However, the integration of patient information, including electronic health records (EHRs), into the SHS framework induces security and privacy concerns. To address these issues, an intelligent EC framework was proposed in this study. The objective of this study is to accurately identify security threats and ensure secure data transmission in the SHS environment. The proposed EC framework leverages the effectiveness of Salp Swarm Optimization and Radial Basis Functional Neural Network (SS-RBFN) for enhancing security and data privacy. The proposed methodology commences with the collection of healthcare information, which is then pre-processed to ensure the consistency and quality of the database for further analysis. Subsequently, the SS-RBFN algorithm was trained using the pre-processed database to distinguish between normal and malicious data streams accurately, offering continuous monitoring in the SHS environment. Additionally, a Rivest-Shamir-Adelman (RSA) approach was applied to safeguard data against security threats during transmission to cloud storage. The proposed model was trained and validated using the IoT-based healthcare database available at Kaggle, and the experimental results demonstrated that it achieved 99.87 % accuracy, 99.76 % precision, 99.49 % f-measure, 98.99 % recall, 97.37 % throughput, and 1.2s latency. Furthermore, the results achieved by the proposed model were compared with the existing models to validate its effectiveness in enhancing security.}, } @article {pmid39038028, year = {2024}, author = {Pulido-Gaytan, B and Tchernykh, A}, title = {Self-learning activation functions to increase accuracy of privacy-preserving Convolutional Neural Networks with homomorphic encryption.}, journal = {PloS one}, volume = {19}, number = {7}, pages = {e0306420}, pmid = {39038028}, issn = {1932-6203}, mesh = {*Neural Networks, Computer ; *Computer Security ; *Privacy ; Humans ; Algorithms ; Cloud Computing ; }, abstract = {The widespread adoption of cloud computing necessitates privacy-preserving techniques that allow information to be processed without disclosure. This paper proposes a method to increase the accuracy and performance of privacy-preserving Convolutional Neural Networks with Homomorphic Encryption (CNN-HE) by Self-Learning Activation Functions (SLAF). SLAFs are polynomials with trainable coefficients updated during training, together with synaptic weights, for each polynomial independently to learn task-specific and CNN-specific features. We theoretically prove its feasibility to approximate any continuous activation function to the desired error as a function of the SLAF degree. Two CNN-HE models are proposed: CNN-HE-SLAF and CNN-HE-SLAF-R. In the first model, all activation functions are replaced by SLAFs, and CNN is trained to find weights and coefficients. In the second one, CNN is trained with the original activation, then weights are fixed, activation is substituted by SLAF, and CNN is shortly re-trained to adapt SLAF coefficients. We show that such self-learning can achieve the same accuracy 99.38% as a non-polynomial ReLU over non-homomorphic CNNs and lead to an increase in accuracy (99.21%) and higher performance (6.26 times faster) than the state-of-the-art CNN-HE CryptoNets on the MNIST optical character recognition benchmark dataset.}, } @article {pmid39028603, year = {2024}, author = {Luo, W and Huang, K and Liang, X and Ren, H and Zhou, N and Zhang, C and Yang, C and Gui, W}, title = {Process Manufacturing Intelligence Empowered by Industrial Metaverse: A Survey.}, journal = {IEEE transactions on cybernetics}, volume = {54}, number = {11}, pages = {6679-6692}, doi = {10.1109/TCYB.2024.3420958}, pmid = {39028603}, issn = {2168-2275}, abstract = {The intelligent goal of process manufacturing is to achieve high efficiency and greening of the entire production. Whereas the information system it used is functionally independent, resulting to knowledge gaps between each level. Decision-making still requires lots of knowledge workers making manually. The industrial metaverse is a necessary means to bridge the knowledge gaps by sharing and collaborative decision-making. Considering the safety and stability requirements of the process manufacturing, this article conducts a thorough survey on the process manufacturing intelligence empowered by industrial metaverse. First, it analyzes the current status and challenges of process manufacturing intelligence, and then summarizes the latest developments about key enabling technologies of industrial metaverse, such as interconnection technologies, artificial intelligence, cloud-edge computing, digital twin (DT), immersive interaction, and blockchain technology. On this basis, taking into account the characteristics of process manufacturing, a construction approach and architecture for the process industrial metaverse is proposed: a virtual-real fused industrial metaverse construction method that combines DTs with physical avatar, which can effectively ensure the safety of metaverse's application in industrial scenarios. Finally, we conducted preliminary exploration and research, to prove the feasibility of proposed method.}, } @article {pmid39024163, year = {2024}, author = {McCoy, ES and Park, SK and Patel, RP and Ryan, DF and Mullen, ZJ and Nesbitt, JJ and Lopez, JE and Taylor-Blake, B and Vanden, KA and Krantz, JL and Hu, W and Garris, RL and Snyder, MG and Lima, LV and Sotocinal, SG and Austin, JS and Kashlan, AD and Shah, S and Trocinski, AK and Pudipeddi, SS and Major, RM and Bazick, HO and Klein, MR and Mogil, JS and Wu, G and Zylka, MJ}, title = {Development of PainFace software to simplify, standardize, and scale up mouse grimace analyses.}, journal = {Pain}, volume = {165}, number = {8}, pages = {1793-1805}, pmid = {39024163}, issn = {1872-6623}, support = {R01 NS114259/NS/NINDS NIH HHS/United States ; }, mesh = {Animals ; Mice ; *Facial Expression ; Female ; *Software/standards ; *Mice, Inbred C57BL ; *Pain Measurement/methods/standards ; Male ; Pain/diagnosis ; }, abstract = {Facial grimacing is used to quantify spontaneous pain in mice and other mammals, but scoring relies on humans with different levels of proficiency. Here, we developed a cloud-based software platform called PainFace (http://painface.net) that uses machine learning to detect 4 facial action units of the mouse grimace scale (orbitals, nose, ears, whiskers) and score facial grimaces of black-coated C57BL/6 male and female mice on a 0 to 8 scale. Platform accuracy was validated in 2 different laboratories, with 3 conditions that evoke grimacing-laparotomy surgery, bilateral hindpaw injection of carrageenan, and intraplantar injection of formalin. PainFace can generate up to 1 grimace score per second from a standard 30 frames/s video, making it possible to quantify facial grimacing over time, and operates at a speed that scales with computing power. By analyzing the frequency distribution of grimace scores, we found that mice spent 7x more time in a "high grimace" state following laparotomy surgery relative to sham surgery controls. Our study shows that PainFace reproducibly quantifies facial grimaces indicative of nonevoked spontaneous pain and enables laboratories to standardize and scale-up facial grimace analyses.}, } @article {pmid39022436, year = {2024}, author = {Malakhov, KS}, title = {Innovative Hybrid Cloud Solutions for Physical Medicine and Telerehabilitation Research.}, journal = {International journal of telerehabilitation}, volume = {16}, number = {1}, pages = {e6635}, pmid = {39022436}, issn = {1945-2020}, abstract = {PURPOSE: The primary objective of this study was to develop and implement a Hybrid Cloud Environment for Telerehabilitation (HCET) to enhance patient care and research in the Physical Medicine and Rehabilitation (PM&R) domain. This environment aims to integrate advanced information and communication technologies to support both traditional in-person therapy and digital health solutions.

BACKGROUND: Telerehabilitation is emerging as a core component of modern healthcare, especially within the PM&R field. By applying digital health technologies, telerehabilitation provides continuous, comprehensive support for patient rehabilitation, bridging the gap between traditional therapy, and remote healthcare delivery. This study focuses on the design, and implementation of a hybrid HCET system tailored for the PM&R domain.

METHODS: The study involved the development of a comprehensive architectural and structural organization for the HCET, including a three-layer model (infrastructure, platform, service layers). Core components of the HCET were designed and implemented, such as the Hospital Information System (HIS) for PM&R, the MedRehabBot system, and the MedLocalGPT project. These components were integrated using advanced technologies like large language models (LLMs), word embeddings, and ontology-related approaches, along with APIs for enhanced functionality and interaction.

FINDINGS: The HCET system was successfully implemented and is operational, providing a robust platform for telerehabilitation. Key features include the MVP of the HIS for PM&R, supporting patient profile management, and rehabilitation goal tracking; the MedRehabBot and WhiteBookBot systems; and the MedLocalGPT project, which offers sophisticated querying capabilities, and access to extensive domain-specific knowledge. The system supports both Ukrainian and English languages, ensuring broad accessibility and usability.

INTERPRETATION: The practical implementation, and operation of the HCET system demonstrate its potential to transform telerehabilitation within the PM&R domain. By integrating advanced technologies, and providing comprehensive digital health solutions, the HCET enhances patient care, supports ongoing rehabilitation, and facilitates advanced research. Future work will focus on optimizing services and expanding language support to further improve the system's functionality and impact.}, } @article {pmid39016361, year = {2024}, author = {Idalino, FD and Rosa, KKD and Hillebrand, FL and Arigony-Neto, J and Mendes, CW and Simões, JC}, title = {Variability in wet and dry snow radar zones in the North of the Antarctic Peninsula using a cloud computing environment.}, journal = {Anais da Academia Brasileira de Ciencias}, volume = {96}, number = {suppl 2}, pages = {e20230704}, doi = {10.1590/0001-3765202420230704}, pmid = {39016361}, issn = {1678-2690}, mesh = {Antarctic Regions ; *Snow ; *Radar ; *Cloud Computing ; Seasons ; Environmental Monitoring/methods ; Temperature ; }, abstract = {This work investigated the annual variations in dry snow (DSRZ) and wet snow radar zones (WSRZ) in the north of the Antarctic Peninsula between 2015-2023. A specific code for snow zone detection on Sentinel-1 images was created on Google Earth Engine by combining the CryoSat-2 digital elevation model and air temperature data from ERA5. Regions with backscatter coefficients (σ[0]) values exceeding -6.5 dB were considered the extent of surface melt occurrence, and the dry snow line was considered to coincide with the -11 °C isotherm of the average annual air temperature. The annual variation in WSRZ exhibited moderate correlations with annual average air temperature, total precipitation, and the sum of annual degree-days. However, statistical tests indicated low determination coefficients and no significant trend values in DSRZ behavior with atmospheric variables. The results of reducing DSRZ area for 2019/2020 and 2020/2021 compared to 2018/2018 indicated the upward in dry zone line in this AP region. The methodology demonstrated its efficacy for both quantitative and qualitative analyses of data obtained in digital processing environments, allowing for the large-scale spatial and temporal variations monitoring and for the understanding changes in glacier mass loss.}, } @article {pmid39008420, year = {2024}, author = {Lee, G and Connor, CW}, title = {"Alexa, Cycle The Blood Pressure": A Voice Control Interface Method for Anesthesia Monitoring.}, journal = {Anesthesia and analgesia}, volume = {139}, number = {3}, pages = {639-646}, pmid = {39008420}, issn = {1526-7598}, support = {R01 GM121457/GM/NIGMS NIH HHS/United States ; R35 GM145319/GM/NIGMS NIH HHS/United States ; }, mesh = {Humans ; *Anesthesiologists ; *Monitoring, Intraoperative/instrumentation/methods ; Male ; Equipment Design ; Voice ; Speech Recognition Software ; Female ; Anesthesiology/instrumentation ; Middle Aged ; Blood Pressure ; Anesthesia ; Blood Pressure Determination/instrumentation/methods ; Adult ; }, abstract = {BACKGROUND: Anesthesia monitors and devices are usually controlled with some combination of dials, keypads, a keyboard, or a touch screen. Thus, anesthesiologists can operate their monitors only when they are physically close to them, and not otherwise task-loaded with sterile procedures such as line or block placement. Voice recognition technology has become commonplace and may offer advantages in anesthesia practice such as reducing surface contamination rates and allowing anesthesiologists to effect changes in monitoring and therapy when they would otherwise presently be unable to do so. We hypothesized that this technology is practicable and that anesthesiologists would consider it useful.

METHODS: A novel voice-driven prototype controller was designed for the GE Solar 8000M anesthesia patient monitor. The apparatus was implemented using a Raspberry Pi 4 single-board computer, an external conference audio device, a Google Cloud Speech-to-Text platform, and a modified Solar controller to effect commands. Fifty anesthesia providers tested the prototype. Evaluations and surveys were completed in a nonclinical environment to avoid any ethical or safety concerns regarding the use of the device in direct patient care. All anesthesiologists sampled were fluent English speakers; many with inflections from their first language or national origin, reflecting diversity in the population of practicing anesthesiologists.

RESULTS: The prototype was uniformly well-received by anesthesiologists. Ease-of-use, usefulness, and effectiveness were assessed on a Likert scale with means of 9.96, 7.22, and 8.48 of 10, respectively. No population cofactors were associated with these results. Advancing level of training (eg, nonattending versus attending) was not correlated with any preference. Accent of country or region was not correlated with any preference. Vocal pitch register did not correlate with any preference. Statistical analyses were performed with analysis of variance and the unpaired t -test.

CONCLUSIONS: The use of voice recognition to control operating room monitors was well-received anesthesia providers. Additional commands are easily implemented on the prototype controller. No adverse relationship was found between acceptability and level of anesthesia experience, pitch of voice, or presence of accent. Voice recognition is a promising method of controlling anesthesia monitors and devices that could potentially increase usability and situational awareness in circumstances where the anesthesiologist is otherwise out-of-position or task-loaded.}, } @article {pmid39007702, year = {2024}, author = {Hsu, WT and Shirts, MR}, title = {Replica Exchange of Expanded Ensembles: A Generalized Ensemble Approach with Enhanced Flexibility and Parallelizability.}, journal = {Journal of chemical theory and computation}, volume = {20}, number = {14}, pages = {6062-6081}, pmid = {39007702}, issn = {1549-9626}, support = {R01 GM123296/GM/NIGMS NIH HHS/United States ; }, abstract = {Generalized ensemble methods such as Hamiltonian replica exchange (HREX) and expanded ensemble (EE) have been shown effective in free energy calculations for various contexts, given their ability to circumvent free energy barriers via nonphysical pathways defined by states with different modified Hamiltonians. However, both HREX and EE methods come with drawbacks, such as limited flexibility in parameter specification or the lack of parallelizability for more complicated applications. To address this challenge, we present the method of replica exchange of expanded ensembles (REXEE), which integrates the principles of HREX and EE methods by periodically exchanging coordinates of EE replicas sampling different yet overlapping sets of alchemical states. With the solvation free energy calculation of anthracene and binding free energy calculation of the CB7-10 binding complex, we show that the REXEE method achieves the same level of accuracy in free energy calculations as the HREX and EE methods, while offering enhanced flexibility and parallelizability. Additionally, we examined REXEE simulations with various setups to understand how different exchange frequencies and replica configurations influence the sampling efficiency in the fixed-weight phase and the weight convergence in the weight-updating phase. The REXEE approach can be further extended to support asynchronous parallelization schemes, allowing looser communications between larger numbers of loosely coupled processors such as cloud computing and therefore promising much more scalable and adaptive executions of alchemical free energy calculations. All algorithms for the REXEE method are available in the Python package ensemble_md, which offers an interface for REXEE simulation management without modifying the source code in GROMACS.}, } @article {pmid39003319, year = {2024}, author = {Nyangaresi, VO and Abduljabbar, ZA and Mutlaq, KA and Bulbul, SS and Ma, J and Aldarwish, AJY and Honi, DG and Al Sibahee, MA and Neamah, HA}, title = {Smart city energy efficient data privacy preservation protocol based on biometrics and fuzzy commitment scheme.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {16223}, pmid = {39003319}, issn = {2045-2322}, support = {GDRC202132//Natural Science Foundation of Top Talent of SZTU/ ; }, abstract = {Advancements in cloud computing, flying ad-hoc networks, wireless sensor networks, artificial intelligence, big data, 5th generation mobile network and internet of things have led to the development of smart cities. Owing to their massive interconnectedness, high volumes of data are collected and exchanged over the public internet. Therefore, the exchanged messages are susceptible to numerous security and privacy threats across these open public channels. Although many security techniques have been designed to address this issue, most of them are still vulnerable to attacks while some deploy computationally extensive cryptographic operations such as bilinear pairings and blockchain. In this paper, we leverage on biometrics, error correction codes and fuzzy commitment schemes to develop a secure and energy efficient authentication scheme for the smart cities. This is informed by the fact that biometric data is cumbersome to reproduce and hence attacks such as side-channeling are thwarted. We formally analyze the security of our protocol using the Burrows-Abadi-Needham logic logic, which shows that our scheme achieves strong mutual authentication among the communicating entities. The semantic analysis of our protocol shows that it mitigates attacks such as de-synchronization, eavesdropping, session hijacking, forgery and side-channeling. In addition, its formal security analysis demonstrates that it is secure under the Canetti and Krawczyk attack model. In terms of performance, our scheme is shown to reduce the computation overheads by 20.7% and hence is the most efficient among the state-of-the-art protocols.}, } @article {pmid39001087, year = {2024}, author = {Alwakeel, AM and Alnaim, AK}, title = {Trust Management and Resource Optimization in Edge and Fog Computing Using the CyberGuard Framework.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {13}, pages = {}, pmid = {39001087}, issn = {1424-8220}, support = {XXXXXX//King Faisal University/ ; }, abstract = {The growing importance of edge and fog computing in the modern IT infrastructure is driven by the rise of decentralized applications. However, resource allocation within these frameworks is challenging due to varying device capabilities and dynamic network conditions. Conventional approaches often result in poor resource use and slowed advancements. This study presents a novel strategy for enhancing resource allocation in edge and fog computing by integrating machine learning with the blockchain for reliable trust management. Our proposed framework, called CyberGuard, leverages the blockchain's inherent immutability and decentralization to establish a trustworthy and transparent network for monitoring and verifying edge and fog computing transactions. CyberGuard combines the Trust2Vec model with conventional machine-learning models like SVM, KNN, and random forests, creating a robust mechanism for assessing trust and security risks. Through detailed optimization and case studies, CyberGuard demonstrates significant improvements in resource allocation efficiency and overall system performance in real-world scenarios. Our results highlight CyberGuard's effectiveness, evidenced by a remarkable accuracy, precision, recall, and F1-score of 98.18%, showcasing the transformative potential of our comprehensive approach in edge and fog computing environments.}, } @article {pmid39001032, year = {2024}, author = {Alwakeel, AM and Alnaim, AK}, title = {Network Slicing in 6G: A Strategic Framework for IoT in Smart Cities.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {13}, pages = {}, pmid = {39001032}, issn = {1424-8220}, support = {000000//King Faisal University/ ; }, abstract = {The emergence of 6G communication technologies brings both opportunities and challenges for the Internet of Things (IoT) in smart cities. In this paper, we introduce an advanced network slicing framework designed to meet the complex demands of 6G smart cities' IoT deployments. The framework development follows a detailed methodology that encompasses requirement analysis, metric formulation, constraint specification, objective setting, mathematical modeling, configuration optimization, performance evaluation, parameter tuning, and validation of the final design. Our evaluations demonstrate the framework's high efficiency, evidenced by low round-trip time (RTT), minimal packet loss, increased availability, and enhanced throughput. Notably, the framework scales effectively, managing multiple connections simultaneously without compromising resource efficiency. Enhanced security is achieved through robust features such as 256-bit encryption and a high rate of authentication success. The discussion elaborates on these findings, underscoring the framework's impressive performance, scalability, and security capabilities.}, } @article {pmid39000973, year = {2024}, author = {Shahid, U and Ahmed, G and Siddiqui, S and Shuja, J and Balogun, AO}, title = {Latency-Sensitive Function Placement among Heterogeneous Nodes in Serverless Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {13}, pages = {}, pmid = {39000973}, issn = {1424-8220}, support = {015LA0-049//Universiti Teknologi Petronas/ ; }, abstract = {Function as a Service (FaaS) is highly beneficial to smart city infrastructure due to its flexibility, efficiency, and adaptability, specifically for integration in the digital landscape. FaaS has serverless setup, which means that an organization no longer has to worry about specific infrastructure management tasks; the developers can focus on how to deploy and create code efficiently. Since FaaS aligns well with the IoT, it easily integrates with IoT devices, thereby making it possible to perform event-based actions and real-time computations. In our research, we offer an exclusive likelihood-based model of adaptive machine learning for identifying the right place of function. We employ the XGBoost regressor to estimate the execution time for each function and utilize the decision tree regressor to predict network latency. By encompassing factors like network delay, arrival computation, and emphasis on resources, the machine learning model eases the selection process of a placement. In replication, we use Docker containers, focusing on serverless node type, serverless node variety, function location, deadlines, and edge-cloud topology. Thus, the primary objectives are to address deadlines and enhance the use of any resource, and from this, we can see that effective utilization of resources leads to enhanced deadline compliance.}, } @article {pmid39000960, year = {2024}, author = {Liu, X and Dong, X and Jia, N and Zhao, W}, title = {Federated Learning-Oriented Edge Computing Framework for the IIoT.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {13}, pages = {}, pmid = {39000960}, issn = {1424-8220}, support = {2022YFB3305700//The National Key Research and Development Program of China/ ; }, abstract = {With the maturity of artificial intelligence (AI) technology, applications of AI in edge computing will greatly promote the development of industrial technology. However, the existing studies on the edge computing framework for the Industrial Internet of Things (IIoT) still face several challenges, such as deep hardware and software coupling, diverse protocols, difficult deployment of AI models, insufficient computing capabilities of edge devices, and sensitivity to delay and energy consumption. To solve the above problems, this paper proposes a software-defined AI-oriented three-layer IIoT edge computing framework and presents the design and implementation of an AI-oriented edge computing system, aiming to support device access, enable the acceptance and deployment of AI models from the cloud, and allow the whole process from data acquisition to model training to be completed at the edge. In addition, this paper proposes a time series-based method for device selection and computation offloading in the federated learning process, which selectively offloads the tasks of inefficient nodes to the edge computing center to reduce the training delay and energy consumption. Finally, experiments carried out to verify the feasibility and effectiveness of the proposed method are reported. The model training time with the proposed method is generally 30% to 50% less than that with the random device selection method, and the training energy consumption under the proposed method is generally 35% to 55% less.}, } @article {pmid38998801, year = {2024}, author = {Zuo, G and Wang, R and Wan, C and Zhang, Z and Zhang, S and Yang, W}, title = {Unveiling the Evolution of Virtual Reality in Medicine: A Bibliometric Analysis of Research Hotspots and Trends over the Past 12 Years.}, journal = {Healthcare (Basel, Switzerland)}, volume = {12}, number = {13}, pages = {}, pmid = {38998801}, issn = {2227-9032}, support = {SZSM202311012//Sanming Project of Medicine in Shenzen Municipality/ ; }, abstract = {BACKGROUND: Virtual reality (VR), widely used in the medical field, may affect future medical training and treatment. Therefore, this study examined VR's potential uses and research directions in medicine.

METHODS: Citation data were downloaded from the Web of Science Core Collection database (WoSCC) to evaluate VR in medicine in articles published between 1 January 2012 and 31 December 2023. These data were analyzed using CiteSpace 6.2. R2 software. Present limitations and future opportunities were summarized based on the data.

RESULTS: A total of 2143 related publications from 86 countries and regions were analyzed. The country with the highest number of publications is the USA, with 461 articles. The University of London has the most publications among institutions, with 43 articles. The burst keywords represent the research frontier from 2020 to 2023, such as "task analysis", "deep learning", and "machine learning".

CONCLUSION: The number of publications on VR applications in the medical field has been steadily increasing year by year. The USA is the leading country in this area, while the University of London stands out as the most published, and most influential institution. Currently, there is a strong focus on integrating VR and AI to address complex issues such as medical education and training, rehabilitation, and surgical navigation. Looking ahead, the future trend involves integrating VR, augmented reality (AR), and mixed reality (MR) with the Internet of Things (IoT), wireless sensor networks (WSNs), big data analysis (BDA), and cloud computing (CC) technologies to develop intelligent healthcare systems within hospitals or medical centers.}, } @article {pmid38997128, year = {2024}, author = {Allers, S and O'Connell, KA and Carlson, T and Belardo, D and King, BL}, title = {Reusable tutorials for using cloud-based computing environments for the analysis of bacterial gene expression data from bulk RNA sequencing.}, journal = {Briefings in bioinformatics}, volume = {25}, number = {4}, pages = {}, pmid = {38997128}, issn = {1477-4054}, support = {P20 GM103423/GM/NIGMS NIH HHS/United States ; P20GM103423//National Institute of General Medical Sciences of the National Institutes of Health to the Maine INBRE Program/ ; }, mesh = {*Cloud Computing ; *Computational Biology/methods ; *Sequence Analysis, RNA/methods ; *Software ; Gene Expression Regulation, Bacterial ; }, abstract = {This manuscript describes the development of a resource module that is part of a learning platform named "NIGMS Sandbox for Cloud-based Learning" https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox at the beginning of this Supplement. This module delivers learning materials on RNA sequencing (RNAseq) data analysis in an interactive format that uses appropriate cloud resources for data access and analyses. Biomedical research is increasingly data-driven, and dependent upon data management and analysis methods that facilitate rigorous, robust, and reproducible research. Cloud-based computing resources provide opportunities to broaden the application of bioinformatics and data science in research. Two obstacles for researchers, particularly those at small institutions, are: (i) access to bioinformatics analysis environments tailored to their research; and (ii) training in how to use Cloud-based computing resources. We developed five reusable tutorials for bulk RNAseq data analysis to address these obstacles. Using Jupyter notebooks run on the Google Cloud Platform, the tutorials guide the user through a workflow featuring an RNAseq dataset from a study of prophage altered drug resistance in Mycobacterium chelonae. The first tutorial uses a subset of the data so users can learn analysis steps rapidly, and the second uses the entire dataset. Next, a tutorial demonstrates how to analyze the read count data to generate lists of differentially expressed genes using R/DESeq2. Additional tutorials generate read counts using the Snakemake workflow manager and Nextflow with Google Batch. All tutorials are open-source and can be used as templates for other analysis.}, } @article {pmid38992079, year = {2024}, author = {Kaur, R and Vaithiyanathan, R}, title = {Hybrid YSGOA and neural networks based software failure prediction in cloud systems.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {16035}, pmid = {38992079}, issn = {2045-2322}, abstract = {In the realm of cloud computing, ensuring the dependability and robustness of software systems is paramount. The intricate and evolving nature of cloud infrastructures, however, presents substantial obstacles in the pre-emptive identification and rectification of software anomalies. This study introduces an innovative methodology that amalgamates hybrid optimization algorithms with Neural Networks (NN) to refine the prediction of software malfunctions. The core objective is to augment the purity metric of our method across diverse operational conditions. This is accomplished through the utilization of two distinct optimization algorithms: the Yellow Saddle Goat Fish Algorithm (YSGA), which is instrumental in the discernment of pivotal features linked to software failures, and the Grasshopper Optimization Algorithm (GOA), which further polishes the feature compilation. These features are then processed by Neural Networks (NN), capitalizing on their proficiency in deciphering intricate data patterns and interconnections. The NNs are integral to the classification of instances predicated on the ascertained features. Our evaluation, conducted using the Failure-Dataset-OpenStack database and MATLAB Software, demonstrates that the hybrid optimization strategy employed for feature selection significantly curtails complexity and expedites processing.}, } @article {pmid38988330, year = {2024}, author = {Martinez, C and Etxaniz, I and Molinuevo, A and Alonso, J}, title = {MEDINA Catalogue of Cloud Security controls and metrics: Towards Continuous Cloud Security compliance.}, journal = {Open research Europe}, volume = {4}, number = {}, pages = {90}, pmid = {38988330}, issn = {2732-5121}, abstract = {In order to address current challenges on security certification of European ICT products, processes and services, the European Comission, through ENISA (European Union Agency for Cybersecurity), has developed the European Cybersecurity Certification Scheme for Cloud Services (EUCS). This paper presents the overview of the H2020 MEDINA project approach and tools to support the adoption of EUCS and offers a detailed description of one of the core components of the framework, the MEDINA Catalogue of Controls and Metrics. The main objective of the MEDINA Catalogue is to provide automated functionalities for CSPs' compliance managers and auditors to ease the certification process towards EUCS, through the provision of all information and guidance related to the scheme, namely categories, controls, security requirements, assurance levels, etc. The tool has been enhanced with all the research and implementation works performed in MEDINA, such as definition of compliance metrics, suggestion of related implementation guidelines, alignment of similar controls in other schemes, and a set of self-assessment questionnaires, which are presented and discussed in this paper.}, } @article {pmid38983206, year = {2024}, author = {Alsadie, D}, title = {Advancements in heuristic task scheduling for IoT applications in fog-cloud computing: challenges and prospects.}, journal = {PeerJ. Computer science}, volume = {10}, number = {}, pages = {e2128}, pmid = {38983206}, issn = {2376-5992}, abstract = {Fog computing has emerged as a prospective paradigm to address the computational requirements of IoT applications, extending the capabilities of cloud computing to the network edge. Task scheduling is pivotal in enhancing energy efficiency, optimizing resource utilization and ensuring the timely execution of tasks within fog computing environments. This article presents a comprehensive review of the advancements in task scheduling methodologies for fog computing systems, covering priority-based, greedy heuristics, metaheuristics, learning-based, hybrid heuristics, and nature-inspired heuristic approaches. Through a systematic analysis of relevant literature, we highlight the strengths and limitations of each approach and identify key challenges facing fog computing task scheduling, including dynamic environments, heterogeneity, scalability, resource constraints, security concerns, and algorithm transparency. Furthermore, we propose future research directions to address these challenges, including the integration of machine learning techniques for real-time adaptation, leveraging federated learning for collaborative scheduling, developing resource-aware and energy-efficient algorithms, incorporating security-aware techniques, and advancing explainable AI methodologies. By addressing these challenges and pursuing these research directions, we aim to facilitate the development of more robust, adaptable, and efficient task-scheduling solutions for fog computing environments, ultimately fostering trust, security, and sustainability in fog computing systems and facilitating their widespread adoption across diverse applications and domains.}, } @article {pmid38980280, year = {2024}, author = {Chen, C and Nguyen, DT and Lee, SJ and Baker, NA and Karakoti, AS and Lauw, L and Owen, C and Mueller, KT and Bilodeau, BA and Murugesan, V and Troyer, M}, title = {Accelerating Computational Materials Discovery with Machine Learning and Cloud High-Performance Computing: from Large-Scale Screening to Experimental Validation.}, journal = {Journal of the American Chemical Society}, volume = {146}, number = {29}, pages = {20009-20018}, doi = {10.1021/jacs.4c03849}, pmid = {38980280}, issn = {1520-5126}, abstract = {High-throughput computational materials discovery has promised significant acceleration of the design and discovery of new materials for many years. Despite a surge in interest and activity, the constraints imposed by large-scale computational resources present a significant bottleneck. Furthermore, examples of very large-scale computational discovery carried out through experimental validation remain scarce, especially for materials with product applicability. Here, we demonstrate how this vision became reality by combining state-of-the-art machine learning (ML) models and traditional physics-based models on cloud high-performance computing (HPC) resources to quickly navigate through more than 32 million candidates and predict around half a million potentially stable materials. By focusing on solid-state electrolytes for battery applications, our discovery pipeline further identified 18 promising candidates with new compositions and rediscovered a decade's worth of collective knowledge in the field as a byproduct. We then synthesized and experimentally characterized the structures and conductivities of our top candidates, the NaxLi3-xYCl6 (0≤ x≤ 3) series, demonstrating the potential of these compounds to serve as solid electrolytes. Additional candidate materials that are currently under experimental investigation could offer more examples of the computational discovery of new phases of Li- and Na-conducting solid electrolytes. The showcased screening of millions of materials candidates highlights the transformative potential of advanced ML and HPC methodologies, propelling materials discovery into a new era of efficiency and innovation.}, } @article {pmid38975754, year = {2024}, author = {Kumar, A and Verma, G}, title = {Multi-level authentication for security in cloud using improved quantum key distribution.}, journal = {Network (Bristol, England)}, volume = {}, number = {}, pages = {1-21}, doi = {10.1080/0954898X.2024.2367480}, pmid = {38975754}, issn = {1361-6536}, abstract = {Cloud computing is an on-demand virtual-based technology to develop, configure, and modify applications online through the internet. It enables the users to handle various operations such as storage, back-up, and recovery of data, data analysis, delivery of software applications, implementation of new services and applications, hosting websites and blogs, and streaming of audio and video files. Thereby, it provides us many benefits although it is backlashed due to problems related to cloud security like data leakage, data loss, cyber attacks, etc. To address the security concerns, researchers have developed a variety of authentication mechanisms. This means that the authentication procedure used in the suggested method is multi-levelled. As a result, a better QKD method is offered to strengthen cloud security against different types of security risks. Key generation for enhanced QKD is based on the ABE public key cryptography approach. Here, an approach named CPABE is used in improved QKD. The Improved QKD scored the reduced KCA attack ratings of 0.3193, this is superior to CMMLA (0.7915), CPABE (0.8916), AES (0.5277), Blowfish (0.6144), and ECC (0.4287), accordingly. Finally, this multi-level authentication using an improved QKD approach is analysed under various measures and validates the enhancement over the state-of-the-art models.}, } @article {pmid38975165, year = {2024}, author = {Yan, L and Wang, G and Feng, H and Liu, P and Gao, H and Zhang, W and Hu, H and Pan, F}, title = {Efficient and accountable anti-leakage attribute-based encryption scheme for cloud storage.}, journal = {Heliyon}, volume = {10}, number = {12}, pages = {e32404}, doi = {10.1016/j.heliyon.2024.e32404}, pmid = {38975165}, issn = {2405-8440}, abstract = {To ensure secure and flexible data sharing in cloud storage, attribute-based encryption (ABE) is introduced to meet the requirements of fine-grained access control and secure one-to-many data sharing. However, the computational burden imposed by attribute encryption renders it unsuitable for resource-constrained environments such as the Internet of Things (IoT) and edge computing. Furthermore, the issue of accountability for illegal keys is crucial, as authorized users may actively disclose or sell authorization keys for personal gain, and keys may also passively leak due to management negligence or hacking incidents. Additionally, since all authorization keys are generated by the attribute authorization center, there is a potential risk of unauthorized key forgery. In response to these challenges, this paper proposes an efficient and accountable leakage-resistant scheme based on attribute encryption. The scheme adopts more secure online/offline encryption mechanisms and cloud server-assisted decryption to alleviate the computational burden on resource-constrained devices. For illegal keys, the scheme supports accountability for both users and the authorization center, allowing the revocation of decryption privileges for malicious users. In the case of passively leaked keys, timely key updates and revocation of decryption capabilities for leaked keys are implemented. Finally, the paper provides selective security and accountability proofs for the scheme under standard models. Efficiency analysis and experimental results demonstrate that the proposed scheme enhances encryption/decryption efficiency, and the storage overhead for accountability is also extremely low.}, } @article {pmid38965311, year = {2024}, author = {Khazali, M}, title = {Universal terminal for cloud quantum computing.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {15412}, pmid = {38965311}, issn = {2045-2322}, abstract = {To bring the quantum computing capacities to the personal edge devices, the optimum approach is to have simple non-error-corrected personal devices that offload the computational tasks to scalable quantum computers via edge servers with cryogenic components and fault-tolerant schemes. Hence the network elements deploy different encoding protocols. This article proposes quantum terminals that are compatible with different encoding protocols; paving the way for realizing mobile edge-quantum computing. By accommodating the atomic lattice processor inside a cavity, the entangling mechanism is provided by the Rydberg cavity-QED technology. The auxiliary atom, responsible for photon emission, senses the logical qubit state via the long-range Rydberg interaction. In other words, the state of logical qubit determines the interaction-induced level-shift at the central atom and hence derives the system over distinguished eigenstates, featuring photon emission at the early or late times controlled by quantum interference. Applying an entanglement-swapping gate on two emitted photons would make the far-separated logical qubits entangled regardless of their encoding protocols. The proposed scheme provides a universal photonic interface for clustering the processors and connecting them with the quantum memories and quantum cloud compatible with different encoding formats.}, } @article {pmid38965235, year = {2024}, author = {Edfeldt, K and Edwards, AM and Engkvist, O and Günther, J and Hartley, M and Hulcoop, DG and Leach, AR and Marsden, BD and Menge, A and Misquitta, L and Müller, S and Owen, DR and Schütt, KT and Skelton, N and Steffen, A and Tropsha, A and Vernet, E and Wang, Y and Wellnitz, J and Willson, TM and Clevert, DA and Haibe-Kains, B and Schiavone, LH and Schapira, M}, title = {A data science roadmap for open science organizations engaged in early-stage drug discovery.}, journal = {Nature communications}, volume = {15}, number = {1}, pages = {5640}, pmid = {38965235}, issn = {2041-1723}, support = {R01 GM140154/GM/NIGMS NIH HHS/United States ; T32 GM135122/GM/NIGMS NIH HHS/United States ; RGPIN-2019-04416//Canadian Network for Research and Innovation in Machining Technology, Natural Sciences and Engineering Research Council of Canada (NSERC Canadian Network for Research and Innovation in Machining Technology)/ ; }, mesh = {*Drug Discovery/methods ; *Machine Learning ; *Data Science/methods ; Humans ; Artificial Intelligence ; Information Dissemination/methods ; Data Mining/methods ; Cloud Computing ; Databases, Factual ; }, abstract = {The Structural Genomics Consortium is an international open science research organization with a focus on accelerating early-stage drug discovery, namely hit discovery and optimization. We, as many others, believe that artificial intelligence (AI) is poised to be a main accelerator in the field. The question is then how to best benefit from recent advances in AI and how to generate, format and disseminate data to enable future breakthroughs in AI-guided drug discovery. We present here the recommendations of a working group composed of experts from both the public and private sectors. Robust data management requires precise ontologies and standardized vocabulary while a centralized database architecture across laboratories facilitates data integration into high-value datasets. Lab automation and opening electronic lab notebooks to data mining push the boundaries of data sharing and data modeling. Important considerations for building robust machine-learning models include transparent and reproducible data processing, choosing the most relevant data representation, defining the right training and test sets, and estimating prediction uncertainty. Beyond data-sharing, cloud-based computing can be harnessed to build and disseminate machine-learning models. Important vectors of acceleration for hit and chemical probe discovery will be (1) the real-time integration of experimental data generation and modeling workflows within design-make-test-analyze (DMTA) cycles openly, and at scale and (2) the adoption of a mindset where data scientists and experimentalists work as a unified team, and where data science is incorporated into the experimental design.}, } @article {pmid38962905, year = {2024}, author = {Li, F and Lv, K and Liu, X and Zhou, Y and Liu, K}, title = {Accurately Computing the Interacted Volume of Molecules over Their 3D Mesh Models.}, journal = {Journal of chemical information and modeling}, volume = {64}, number = {14}, pages = {5535-5546}, doi = {10.1021/acs.jcim.4c00641}, pmid = {38962905}, issn = {1549-960X}, mesh = {*Models, Molecular ; Static Electricity ; Algorithms ; Molecular Conformation ; Catalysis ; }, abstract = {For quickly predicting the rational arrangement of catalysts and substrates, we previously proposed a method to calculate the interacted volumes of molecules over their 3D point cloud models. However, the nonuniform density in molecular point clouds may lead to incomplete contours in some slices, reducing the accuracy of the previous method. In this paper, we propose a two-step method for more accurately computing molecular interacted volumes. First, by employing a prematched mesh slicing method, we layer the 3D triangular mesh models of the electrostatic potential isosurfaces of two molecules globally, transforming the volume calculation into finding the intersecting areas in each layer. Next, by subdividing polygonal edges, we accurately identify intersecting parts within each layer, ensuring precise calculation of interacted volumes. In addition, we present a concise overview for computing intersecting areas in cases of multiple contour intersections and for improving computational efficiency by incorporating bounding boxes at three stages. Experimental results demonstrate that our method maintains high accuracy in different experimental data sets, with an average relative error of 0.16%. On the same experimental setup, our average relative error is 0.07%, which is lower than the previous algorithm's 1.73%, improving the accuracy and stability in calculating interacted volumes.}, } @article {pmid38941113, year = {2024}, author = {Seaman, RP and Campbell, R and Doe, V and Yosufzai, Z and Graber, JH}, title = {A cloud-based training module for efficient de novo transcriptome assembly using Nextflow and Google cloud.}, journal = {Briefings in bioinformatics}, volume = {25}, number = {4}, pages = {}, pmid = {38941113}, issn = {1477-4054}, support = {P20 GM103423/GM/NIGMS NIH HHS/United States ; //Administrative Supplement to the Maine INBRE/ ; P20GM103423//National Institute of General Medical Sciences of the National Institutes of Health/ ; //Institutional Development Award/ ; }, mesh = {*Cloud Computing ; *Transcriptome ; Computational Biology/methods/education ; Software ; Humans ; Gene Expression Profiling/methods ; Internet ; }, abstract = {This study describes the development of a resource module that is part of a learning platform named "NIGMS Sandbox for Cloud-based Learning" (https://github.com/NIGMS/NIGMS-Sandbox). The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox at the beginning of this Supplement. This module delivers learning materials on de novo transcriptome assembly using Nextflow in an interactive format that uses appropriate cloud resources for data access and analysis. Cloud computing is a powerful new means by which biomedical researchers can access resources and capacity that were previously either unattainable or prohibitively expensive. To take advantage of these resources, however, the biomedical research community needs new skills and knowledge. We present here a cloud-based training module, developed in conjunction with Google Cloud, Deloitte Consulting, and the NIH STRIDES Program, that uses the biological problem of de novo transcriptome assembly to demonstrate and teach the concepts of computational workflows (using Nextflow) and cost- and resource-efficient use of Cloud services (using Google Cloud Platform). Our work highlights the reduced necessity of on-site computing resources and the accessibility of cloud-based infrastructure for bioinformatics applications.}, } @article {pmid38939612, year = {2023}, author = {Tanade, C and Rakestraw, E and Ladd, W and Draeger, E and Randles, A}, title = {Cloud Computing to Enable Wearable-Driven Longitudinal Hemodynamic Maps.}, journal = {International Conference for High Performance Computing, Networking, Storage and Analysis : [proceedings]. SC (Conference : Supercomputing)}, volume = {2023}, number = {}, pages = {}, pmid = {38939612}, issn = {2167-4337}, support = {DP1 AG082343/AG/NIA NIH HHS/United States ; }, abstract = {Tracking hemodynamic responses to treatment and stimuli over long periods remains a grand challenge. Moving from established single-heartbeat technology to longitudinal profiles would require continuous data describing how the patient's state evolves, new methods to extend the temporal domain over which flow is sampled, and high-throughput computing resources. While personalized digital twins can accurately measure 3D hemodynamics over several heartbeats, state-of-the-art methods would require hundreds of years of wallclock time on leadership scale systems to simulate one day of activity. To address these challenges, we propose a cloud-based, parallel-in-time framework leveraging continuous data from wearable devices to capture the first 3D patient-specific, longitudinal hemodynamic maps. We demonstrate the validity of our method by establishing ground truth data for 750 beats and comparing the results. Our cloud-based framework is based on an initial fixed set of simulations to enable the wearable-informed creation of personalized longitudinal hemodynamic maps.}, } @article {pmid38934441, year = {2024}, author = {Siruvoru, V and Aparna, S}, title = {Hybrid deep learning and optimized clustering mechanism for load balancing and fault tolerance in cloud computing.}, journal = {Network (Bristol, England)}, volume = {}, number = {}, pages = {1-22}, doi = {10.1080/0954898X.2024.2369137}, pmid = {38934441}, issn = {1361-6536}, abstract = {Cloud services are one of the most quickly developing technologies. Furthermore, load balancing is recognized as a fundamental challenge for achieving energy efficiency. The primary function of load balancing is to deliver optimal services by releasing the load over multiple resources. Fault tolerance is being used to improve the reliability and accessibility of the network. In this paper, a hybrid Deep Learning-based load balancing algorithm is developed. Initially, tasks are allocated to all VMs in a round-robin method. Furthermore, the Deep Embedding Cluster (DEC) utilizes the Central Processing Unit (CPU), bandwidth, memory, processing elements, and frequency scaling factors while determining if a VM is overloaded or underloaded. The task performed on the overloaded VM is valued and the tasks accomplished on the overloaded VM are assigned to the underloaded VM for cloud load balancing. In addition, the Deep Q Recurrent Neural Network (DQRNN) is proposed to balance the load based on numerous factors such as supply, demand, capacity, load, resource utilization, and fault tolerance. Furthermore, the effectiveness of this model is assessed by load, capacity, resource consumption, and success rate, with ideal values of 0.147, 0.726, 0.527, and 0.895 are achieved.}, } @article {pmid38931731, year = {2024}, author = {Francini, S and Marcelli, A and Chirici, G and Di Biase, RM and Fattorini, L and Corona, P}, title = {Per-Pixel Forest Attribute Mapping and Error Estimation: The Google Earth Engine and R dataDriven Tool.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {12}, pages = {}, pmid = {38931731}, issn = {1424-8220}, abstract = {Remote sensing products are typically assessed using a single accuracy estimate for the entire map, despite significant variations in accuracy across different map areas or classes. Estimating per-pixel uncertainty is a major challenge for enhancing the usability and potential of remote sensing products. This paper introduces the dataDriven open access tool, a novel statistical design-based approach that specifically addresses this issue by estimating per-pixel uncertainty through a bootstrap resampling procedure. Leveraging Sentinel-2 remote sensing data as auxiliary information, the capabilities of the Google Earth Engine cloud computing platform, and the R programming language, dataDriven can be applied in any world region and variables of interest. In this study, the dataDriven tool was tested in the Rincine forest estate study area-eastern Tuscany, Italy-focusing on volume density as the variable of interest. The average volume density was 0.042, corresponding to 420 m[3] per hectare. The estimated pixel errors ranged between 93 m[3] and 979 m[3] per hectare and were 285 m[3] per hectare on average. The ability to produce error estimates for each pixel in the map is a novel aspect in the context of the current advances in remote sensing and forest monitoring and assessment. It constitutes a significant support in forest management applications and also a powerful communication tool since it informs users about areas where map estimates are unreliable, at the same time highlighting the areas where the information provided via the map is more trustworthy. In light of this, the dataDriven tool aims to support researchers and practitioners in the spatially exhaustive use of remote sensing-derived products and map validation.}, } @article {pmid38931559, year = {2024}, author = {Hong, S and Kim, Y and Nam, J and Kim, S}, title = {On the Analysis of Inter-Relationship between Auto-Scaling Policy and QoS of FaaS Workloads.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {12}, pages = {}, pmid = {38931559}, issn = {1424-8220}, support = {2021R1G1A1006326//National Research Foundation of Korea/ ; }, abstract = {A recent development in cloud computing has introduced serverless technology, enabling the convenient and flexible management of cloud-native applications. Typically, the Function-as-a-Service (FaaS) solutions rely on serverless backend solutions, such as Kubernetes (K8s) and Knative, to leverage the advantages of resource management for underlying containerized contexts, including auto-scaling and pod scheduling. To take the advantages, recent cloud service providers also deploy self-hosted serverless services by facilitating their on-premise hosted FaaS platforms rather than relying on commercial public cloud offerings. However, the lack of standardized guidelines on K8s abstraction to fairly schedule and allocate resources on auto-scaling configuration options for such on-premise hosting environment in serverless computing poses challenges in meeting the service level objectives (SLOs) of diverse workloads. This study fills this gap by exploring the relationship between auto-scaling behavior and the performance of FaaS workloads depending on scaling-related configurations in K8s. Based on comprehensive measurement studies, we derived the logic as to which workload should be applied and with what type of scaling configurations, such as base metric, threshold to maximize the difference in latency SLO, and number of responses. Additionally, we propose a methodology to assess the scaling efficiency of the related K8s configurations regarding the quality of service (QoS) of FaaS workloads.}, } @article {pmid38918484, year = {2024}, author = {Hernández Olcina, J and Anquela Julián, AB and Martín Furones, ÁE}, title = {Navigating latency hurdles: an in-depth examination of a cloud-powered GNSS real-time positioning application on mobile devices.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {14668}, pmid = {38918484}, issn = {2045-2322}, abstract = {A growing dependence on real-time positioning apps for navigation, safety, and location-based services necessitates a deep understanding of latency challenges within cloud-based Global Navigation Satellite System (GNSS) solutions. This study analyses a GNSS real-time positioning app on smartphones that utilizes cloud computing for positioning data delivery. The study investigates and quantifies diverse latency contributors throughout the system architecture, including GNSS signal acquisition, data transmission, cloud processing, and result dissemination. Controlled experiments and real-world scenarios are employed to assess the influence of network conditions, device capabilities, and cloud server load on overall positioning latency. Findings highlight system bottlenecks and their relative contributions to latency. Additionally, practical recommendations are presented for developers and cloud service providers to mitigate these challenges and guarantee an optimal user experience for real-time positioning applications. This study not only elucidates the complex interplay of factors affecting GNSS app latency, but also paves the way for future advancements in cloud-based positioning solutions, ensuring the accuracy and timeliness critical for safety-critical and emerging applications.}, } @article {pmid38916063, year = {2024}, author = {Ćosić, K and Popović, S and Wiederhold, BK}, title = {Enhancing Aviation Safety through AI-Driven Mental Health Management for Pilots and Air Traffic Controllers.}, journal = {Cyberpsychology, behavior and social networking}, volume = {27}, number = {8}, pages = {588-598}, doi = {10.1089/cyber.2023.0737}, pmid = {38916063}, issn = {2152-2723}, mesh = {Humans ; *Artificial Intelligence ; *Pilots ; *Aviation ; *Accidents, Aviation/prevention & control ; Mental Health ; Safety ; Mental Disorders/prevention & control/therapy ; }, abstract = {This article provides an overview of the mental health challenges faced by pilots and air traffic controllers (ATCs), whose stressful professional lives may negatively impact global flight safety and security. The adverse effects of mental health disorders on their flight performance pose a particular safety risk, especially in sudden unexpected startle situations. Therefore, the early detection, prediction and prevention of mental health deterioration in pilots and ATCs, particularly among those at high risk, are crucial to minimize potential air crash incidents caused by human factors. Recent research in artificial intelligence (AI) demonstrates the potential of machine and deep learning, edge and cloud computing, virtual reality and wearable multimodal physiological sensors for monitoring and predicting mental health disorders. Longitudinal monitoring and analysis of pilots' and ATCs physiological, cognitive and behavioral states could help predict individuals at risk of undisclosed or emerging mental health disorders. Utilizing AI tools and methodologies to identify and select these individuals for preventive mental health training and interventions could be a promising and effective approach to preventing potential air crash accidents attributed to human factors and related mental health problems. Based on these insights, the article advocates for the design of a multidisciplinary mental healthcare ecosystem in modern aviation using AI tools and technologies, to foster more efficient and effective mental health management, thereby enhancing flight safety and security standards. This proposed ecosystem requires the collaboration of multidisciplinary experts, including psychologists, neuroscientists, physiologists, psychiatrists, etc. to address these challenges in modern aviation.}, } @article {pmid38915693, year = {2025}, author = {Czech, E and Millar, TR and Tyler, W and White, T and Elsworth, B and Guez, J and Hancox, J and Jeffery, B and Karczewski, KJ and Miles, A and Tallman, S and Unneberg, P and Wojdyla, R and Zabad, S and Hammerbacher, J and Kelleher, J}, title = {Analysis-ready VCF at Biobank scale using Zarr.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {38915693}, issn = {2692-8205}, support = {/WT_/Wellcome Trust/United Kingdom ; R01 HG012473/HG/NHGRI NIH HHS/United States ; R56 HG011395/HG/NHGRI NIH HHS/United States ; }, abstract = {BACKGROUND: Variant Call Format (VCF) is the standard file format for interchanging genetic variation data and associated quality control metrics. The usual row-wise encoding of the VCF data model (either as text or packed binary) emphasises efficient retrieval of all data for a given variant, but accessing data on a field or sample basis is inefficient. Biobank scale datasets currently available consist of hundreds of thousands of whole genomes and hundreds of terabytes of compressed VCF. Row-wise data storage is fundamentally unsuitable and a more scalable approach is needed.

RESULTS: Zarr is a format for storing multi-dimensional data that is widely used across the sciences, and is ideally suited to massively parallel processing. We present the VCF Zarr specification, an encoding of the VCF data model using Zarr, along with fundamental software infrastructure for efficient and reliable conversion at scale. We show how this format is far more efficient than standard VCF based approaches, and competitive with specialised methods for storing genotype data in terms of compression ratios and single-threaded calculation performance. We present case studies on subsets of three large human datasets (Genomics England: n=78,195; Our Future Health: n=651,050; All of Us: n=245,394) along with whole genome datasets for Norway Spruce (n=1,063) and SARS-CoV-2 (n=4,484,157). We demonstrate the potential for VCF Zarr to enable a new generation of high-performance and cost-effective applications via illustrative examples using cloud computing and GPUs.

CONCLUSIONS: Large row-encoded VCF files are a major bottleneck for current research, and storing and processing these files incurs a substantial cost. The VCF Zarr specification, building on widely-used, open-source technologies has the potential to greatly reduce these costs, and may enable a diverse ecosystem of next-generation tools for analysing genetic variation data directly from cloud-based object stores, while maintaining compatibility with existing file-oriented workflows.}, } @article {pmid38912450, year = {2024}, author = {Yang, Y and Ren, K and Song, J}, title = {Enhancing Earth data analysis in 5G satellite networks: A novel lightweight approach integrating improved deep learning.}, journal = {Heliyon}, volume = {10}, number = {11}, pages = {e32071}, pmid = {38912450}, issn = {2405-8440}, abstract = {Efficiently handling huge data amounts and enabling processing-intensive applications to run in faraway areas simultaneously is the ultimate objective of 5G networks. Currently, in order to distribute computing tasks, ongoing studies are exploring the incorporation of fog-cloud servers onto satellites, presenting a promising solution to enhance connectivity in remote areas. Nevertheless, analyzing the copious amounts of data produced by scattered sensors remains a challenging endeavor. The conventional strategy of transmitting this data to a central server for analysis can be costly. In contrast to centralized learning methods, distributed machine learning (ML) provides an alternative approach, albeit with notable drawbacks. This paper addresses the comparative learning expenses of centralized and distributed learning systems to tackle these challenges directly. It proposes the creation of an integrated system that harmoniously merges cloud servers with satellite network structures, leveraging the strengths of each system. This integration could represent a major breakthrough in satellite-based networking technology by streamlining data processing from remote nodes and cutting down on expenses. The core of this approach lies in the adaptive tailoring of learning techniques for individual entities based on their specific contextual nuances. The experimental findings underscore the prowess of the innovative lightweight strategy, LMAED[2]L (Enhanced Deep Learning for Earth Data Analysis), across a spectrum of machine learning assignments, showcasing remarkable and consistent performance under diverse operational conditions. Through a strategic fusion of centralized and distributed learning frameworks, the LMAED2L method emerges as a dynamic and effective remedy for the intricate data analysis challenges encountered within satellite networks interfaced with cloud servers. The empirical findings reveal a significant performance boost of our novel approach over traditional methods, with an average increase in reward (4.1 %), task completion rate (3.9 %), and delivered packets (3.4 %). This report suggests that these advancements will catalyze the integration of cutting-edge machine learning algorithms within future networks, elevating responsiveness, efficiency, and resource utilization to new heights.}, } @article {pmid38909109, year = {2024}, author = {Qu, L and Xie, HQ and Pei, JL and Li, YG and Wu, JM and Feng, G and Xiao, ML}, title = {Cloud inversion analysis of surrounding rock parameters for underground powerhouse based on PSO-BP optimized neural network and web technology.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {14399}, pmid = {38909109}, issn = {2045-2322}, support = {No. 52109135//National Natural Science Foundation of China/ ; No. 2022-03//Science and Technology Innovation Program from Water Resources of Guangdong Province/ ; }, abstract = {Aiming at the shortcomings of the BP neural network in practical applications, such as easy to fall into local extremum and slow convergence speed, we optimized the initial weights and thresholds of the BP neural network using the particle swarm optimization (PSO). Additionally, cloud computing service, web technology, cloud database and numerical simulation were integrated to construct an intelligent feedback analysis cloud program for underground engineering safety monitoring based on the PSO-BP algorithm. The program could conveniently, quickly, and intelligently carry out numerical analysis of underground engineering and dynamic feedback analysis of surrounding rock parameters. The program was applied to the cloud inversion analysis of the surrounding rock parameters for the underground powerhouse of the Shuangjiangkou Hydropower Station. The calculated displacement simulated with the back-analyzed parameters matches the measured displacement very well. The posterior variance evaluation shows that the posterior error ratio is 0.045 and the small error probability is 0.999. The evaluation results indicate that the intelligent feedback analysis cloud program has high accuracy and can be applied to engineering practice.}, } @article {pmid38904211, year = {2024}, author = {Navaneethakrishnan, M and Robinson Joel, M and Kalavai Palani, S and Gnanaprakasam, GJ}, title = {EfficientNet-deep quantum neural network-based economic denial of sustainability attack detection to enhance network security in cloud.}, journal = {Network (Bristol, England)}, volume = {}, number = {}, pages = {1-25}, doi = {10.1080/0954898X.2024.2361093}, pmid = {38904211}, issn = {1361-6536}, abstract = {Cloud computing (CC) is a future revolution in the Information technology (IT) and Communication field. Security and internet connectivity are the common major factors to slow down the proliferation of CC. Recently, a new kind of denial of service (DDoS) attacks, known as Economic Denial of Sustainability (EDoS) attack, has been emerging. Though EDoS attacks are smaller at a moment, it can be expected to develop in nearer prospective in tandem with progression in the cloud usage. Here, EfficientNet-B3-Attn-2 fused Deep Quantum Neural Network (EfficientNet-DQNN) is presented for EDoS detection. Initially, cloud is simulated and thereafter, considered input log file is fed to perform data pre-processing. Z-Score Normalization ;(ZSN) is employed to carry out pre-processing of data. Afterwards, feature fusion (FF) is accomplished based on Deep Neural Network (DNN) with Kulczynski similarity. Then, data augmentation (DA) is executed by oversampling based upon Synthetic Minority Over-sampling Technique (SMOTE). At last, attack detection is conducted utilizing EfficientNet-DQNN. Furthermore, EfficientNet-DQNN is formed by incorporation of EfficientNet-B3-Attn-2 with DQNN. In addition, EfficientNet-DQNN attained 89.8% of F1-score, 90.4% of accuracy, 91.1% of precision and 91.2% of recall using BOT-IOT dataset at K-Fold is 9.}, } @article {pmid38902499, year = {2024}, author = {Dai, S}, title = {On the quantum circuit implementation of modus ponens.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {14245}, pmid = {38902499}, issn = {2045-2322}, support = {62006168//National Natural Science Foundation of China/ ; LQ21A010001//Natural Science Foundation of Zhejiang Province/ ; }, abstract = {The process of inference reflects the structure of propositions with assigned truth values, either true or false. Modus ponens is a fundamental form of inference that involves affirming the antecedent to affirm the consequent. Inspired by the quantum computer, the superposition of true and false is used for the parallel processing. In this work, we propose a quantum version of modus ponens. Additionally, we introduce two generations of quantum modus ponens: quantum modus ponens inference chain and multidimensional quantum modus ponens. Finally, a simple implementation of quantum modus ponens on the OriginQ quantum computing cloud platform is demonstrated.}, } @article {pmid38894434, year = {2024}, author = {Gazis, A and Katsiri, E}, title = {Streamline Intelligent Crowd Monitoring with IoT Cloud Computing Middleware.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {11}, pages = {}, pmid = {38894434}, issn = {1424-8220}, abstract = {This article introduces a novel middleware that utilizes cost-effective, low-power computing devices like Raspberry Pi to analyze data from wireless sensor networks (WSNs). It is designed for indoor settings like historical buildings and museums, tracking visitors and identifying points of interest. It serves as an evacuation aid by monitoring occupancy and gauging the popularity of specific areas, subjects, or art exhibitions. The middleware employs a basic form of the MapReduce algorithm to gather WSN data and distribute it across available computer nodes. Data collected by RFID sensors on visitor badges is stored on mini-computers placed in exhibition rooms and then transmitted to a remote database after a preset time frame. Utilizing MapReduce for data analysis and a leader election algorithm for fault tolerance, this middleware showcases its viability through metrics, demonstrating applications like swift prototyping and accurate validation of findings. Despite using simpler hardware, its performance matches resource-intensive methods involving audiovisual and AI techniques. This design's innovation lies in its fault-tolerant, distributed setup using budget-friendly, low-power devices rather than resource-heavy hardware or methods. Successfully tested at a historical building in Greece (M. Hatzidakis' residence), it is tailored for indoor spaces. This paper compares its algorithmic application layer with other implementations, highlighting its technical strengths and advantages. Particularly relevant in the wake of the COVID-19 pandemic and general monitoring middleware for indoor locations, this middleware holds promise in tracking visitor counts and overall building occupancy.}, } @article {pmid38894431, year = {2024}, author = {López-Ortiz, EJ and Perea-Trigo, M and Soria-Morillo, LM and Álvarez-García, JA and Vegas-Olmos, JJ}, title = {Energy-Efficient Edge and Cloud Image Classification with Multi-Reservoir Echo State Network and Data Processing Units.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {11}, pages = {}, pmid = {38894431}, issn = {1424-8220}, abstract = {In an era dominated by Internet of Things (IoT) devices, software-as-a-service (SaaS) platforms, and rapid advances in cloud and edge computing, the demand for efficient and lightweight models suitable for resource-constrained devices such as data processing units (DPUs) has surged. Traditional deep learning models, such as convolutional neural networks (CNNs), pose significant computational and memory challenges, limiting their use in resource-constrained environments. Echo State Networks (ESNs), based on reservoir computing principles, offer a promising alternative with reduced computational complexity and shorter training times. This study explores the applicability of ESN-based architectures in image classification and weather forecasting tasks, using benchmarks such as the MNIST, FashionMnist, and CloudCast datasets. Through comprehensive evaluations, the Multi-Reservoir ESN (MRESN) architecture emerges as a standout performer, demonstrating its potential for deployment on DPUs or home stations. In exploiting the dynamic adaptability of MRESN to changing input signals, such as weather forecasts, continuous on-device training becomes feasible, eliminating the need for static pre-trained models. Our results highlight the importance of lightweight models such as MRESN in cloud and edge computing applications where efficiency and sustainability are paramount. This study contributes to the advancement of efficient computing practices by providing novel insights into the performance and versatility of MRESN architectures. By facilitating the adoption of lightweight models in resource-constrained environments, our research provides a viable alternative for improved efficiency and scalability in modern computing paradigms.}, } @article {pmid38876087, year = {2024}, author = {Bayerlein, R and Swarnakar, V and Selfridge, A and Spencer, BA and Nardo, L and Badawi, RD}, title = {Cloud-based serverless computing enables accelerated monte carlo simulations for nuclear medicine imaging.}, journal = {Biomedical physics & engineering express}, volume = {10}, number = {4}, pages = {}, pmid = {38876087}, issn = {2057-1976}, support = {R01 CA206187/CA/NCI NIH HHS/United States ; R01 CA249422/CA/NCI NIH HHS/United States ; }, mesh = {*Monte Carlo Method ; *Cloud Computing ; *Nuclear Medicine/methods ; *Computer Simulation ; *Software ; Humans ; Image Processing, Computer-Assisted/methods ; Positron-Emission Tomography/methods ; Internet ; Algorithms ; }, abstract = {Objective.This study investigates the potential of cloud-based serverless computing to accelerate Monte Carlo (MC) simulations for nuclear medicine imaging tasks. MC simulations can pose a high computational burden-even when executed on modern multi-core computing servers. Cloud computing allows simulation tasks to be highly parallelized and considerably accelerated.Approach.We investigate the computational performance of a cloud-based serverless MC simulation of radioactive decays for positron emission tomography imaging using Amazon Web Service (AWS) Lambda serverless computing platform for the first time in scientific literature. We provide a comparison of the computational performance of AWS to a modern on-premises multi-thread reconstruction server by measuring the execution times of the processes using between105and2·1010simulated decays. We deployed two popular MC simulation frameworks-SimSET and GATE-within the AWS computing environment. Containerized application images were used as a basis for an AWS Lambda function, and local (non-cloud) scripts were used to orchestrate the deployment of simulations. The task was broken down into smaller parallel runs, and launched on concurrently running AWS Lambda instances, and the results were postprocessed and downloaded via the Simple Storage Service.Main results.Our implementation of cloud-based MC simulations with SimSET outperforms local server-based computations by more than an order of magnitude. However, the GATE implementation creates more and larger output file sizes and reveals that the internet connection speed can become the primary bottleneck for data transfers. Simulating 10[9]decays using SimSET is possible within 5 min and accrues computation costs of about $10 on AWS, whereas GATE would have to run in batches for more than 100 min at considerably higher costs.Significance.Adopting cloud-based serverless computing architecture in medical imaging research facilities can considerably improve processing times and overall workflow efficiency, with future research exploring additional enhancements through optimized configurations and computational methods.}, } @article {pmid38875671, year = {2024}, author = {Guo, Y and Ganti, S and Wu, Y}, title = {Enhancing Energy Efficiency in Telehealth Internet of Things Systems Through Fog and Cloud Computing Integration: Simulation Study.}, journal = {JMIR biomedical engineering}, volume = {9}, number = {}, pages = {e50175}, pmid = {38875671}, issn = {2561-3278}, abstract = {BACKGROUND: The increasing adoption of telehealth Internet of Things (IoT) devices in health care informatics has led to concerns about energy use and data processing efficiency.

OBJECTIVE: This paper introduces an innovative model that integrates telehealth IoT devices with a fog and cloud computing-based platform, aiming to enhance energy efficiency in telehealth IoT systems.

METHODS: The proposed model incorporates adaptive energy-saving strategies, localized fog nodes, and a hybrid cloud infrastructure. Simulation analyses were conducted to assess the model's effectiveness in reducing energy consumption and enhancing data processing efficiency.

RESULTS: Simulation results demonstrated significant energy savings, with a 2% reduction in energy consumption achieved through adaptive energy-saving strategies. The sample size for the simulation was 10-40, providing statistical robustness to the findings.

CONCLUSIONS: The proposed model successfully addresses energy and data processing challenges in telehealth IoT scenarios. By integrating fog computing for local processing and a hybrid cloud infrastructure, substantial energy savings are achieved. Ongoing research will focus on refining the energy conservation model and exploring additional functional enhancements for broader applicability in health care and industrial contexts.}, } @article {pmid38875568, year = {2023}, author = {Chan, NB and Li, W and Aung, T and Bazuaye, E and Montero, RM}, title = {Machine Learning-Based Time in Patterns for Blood Glucose Fluctuation Pattern Recognition in Type 1 Diabetes Management: Development and Validation Study.}, journal = {JMIR AI}, volume = {2}, number = {}, pages = {e45450}, pmid = {38875568}, issn = {2817-1705}, abstract = {BACKGROUND: Continuous glucose monitoring (CGM) for diabetes combines noninvasive glucose biosensors, continuous monitoring, cloud computing, and analytics to connect and simulate a hospital setting in a person's home. CGM systems inspired analytics methods to measure glycemic variability (GV), but existing GV analytics methods disregard glucose trends and patterns; hence, they fail to capture entire temporal patterns and do not provide granular insights about glucose fluctuations.

OBJECTIVE: This study aimed to propose a machine learning-based framework for blood glucose fluctuation pattern recognition, which enables a more comprehensive representation of GV profiles that could present detailed fluctuation information, be easily understood by clinicians, and provide insights about patient groups based on time in blood fluctuation patterns.

METHODS: Overall, 1.5 million measurements from 126 patients in the United Kingdom with type 1 diabetes mellitus (T1DM) were collected, and prevalent blood fluctuation patterns were extracted using dynamic time warping. The patterns were further validated in 225 patients in the United States with T1DM. Hierarchical clustering was then applied on time in patterns to form 4 clusters of patients. Patient groups were compared using statistical analysis.

RESULTS: In total, 6 patterns depicting distinctive glucose levels and trends were identified and validated, based on which 4 GV profiles of patients with T1DM were found. They were significantly different in terms of glycemic statuses such as diabetes duration (P=.04), glycated hemoglobin level (P<.001), and time in range (P<.001) and thus had different management needs.

CONCLUSIONS: The proposed method can analytically extract existing blood fluctuation patterns from CGM data. Thus, time in patterns can capture a rich view of patients' GV profile. Its conceptual resemblance with time in range, along with rich blood fluctuation details, makes it more scalable, accessible, and informative to clinicians.}, } @article {pmid38870489, year = {2024}, author = {Danning, Z and Jia, Q and Yinni, M and Linjia, L}, title = {Establishment and Verification of a Skin Cancer Diagnosis Model Based on Image Convolutional Neural Network Analysis and Artificial Intelligence Algorithms.}, journal = {Alternative therapies in health and medicine}, volume = {}, number = {}, pages = {}, pmid = {38870489}, issn = {1078-6791}, abstract = {Skin cancer is a serious public health problem, with countless deaths due to skin cancer each year. Early detection, aggressive and effective primary focus is the best treatment for skin cancer, which is important to improve patients' prognosis and reduce the death rate of the disease. However, judging skin tumors by the naked eye alone is a highly subjective factor, and the diagnosis can vary greatly even among professionally trained physicians. Clinically, skin endoscopy is a commonly used method for early diagnosis. However, the manual examination is time-consuming, laborious, and highly dependent on the clinical practice of dermatologists. In today's society, with the rapid development of information technology, the amount of information is increasing at a geometric rate, and new technologies such as cloud computing, distributed, data mining, and meta-inspiration are emerging. In this paper, we design and build a computer-aided diagnosis system for dermatoscopic images and apply meta-heuristic algorithms to image enhancement and image cutting to improve the quality of images, thus increasing the speed of diagnosis, early detection, and early treatment.}, } @article {pmid38869158, year = {2024}, author = {Hu, Y and Schnaubelt, M and Chen, L and Zhang, B and Hoang, T and Lih, TM and Zhang, Z and Zhang, H}, title = {MS-PyCloud: A Cloud Computing-Based Pipeline for Proteomic and Glycoproteomic Data Analyses.}, journal = {Analytical chemistry}, volume = {96}, number = {25}, pages = {10145-10151}, pmid = {38869158}, issn = {1520-6882}, support = {U01 CA152813/CA/NCI NIH HHS/United States ; U24 CA210985/CA/NCI NIH HHS/United States ; U24 CA271079/CA/NCI NIH HHS/United States ; U2C CA271895/CA/NCI NIH HHS/United States ; }, mesh = {*Proteomics/methods ; Software ; Tandem Mass Spectrometry/methods ; Cloud Computing ; Glycoproteins/analysis ; Humans ; }, abstract = {Rapid development and wide adoption of mass spectrometry-based glycoproteomic technologies have empowered scientists to study proteins and protein glycosylation in complex samples on a large scale. This progress has also created unprecedented challenges for individual laboratories to store, manage, and analyze proteomic and glycoproteomic data, both in the cost for proprietary software and high-performance computing and in the long processing time that discourages on-the-fly changes of data processing settings required in explorative and discovery analysis. We developed an open-source, cloud computing-based pipeline, MS-PyCloud, with graphical user interface (GUI), for proteomic and glycoproteomic data analysis. The major components of this pipeline include data file integrity validation, MS/MS database search for spectral assignments to peptide sequences, false discovery rate estimation, protein inference, quantitation of global protein levels, and specific glycan-modified glycopeptides as well as other modification-specific peptides such as phosphorylation, acetylation, and ubiquitination. To ensure the transparency and reproducibility of data analysis, MS-PyCloud includes open-source software tools with comprehensive testing and versioning for spectrum assignments. Leveraging public cloud computing infrastructure via Amazon Web Services (AWS), MS-PyCloud scales seamlessly based on analysis demand to achieve fast and efficient performance. Application of the pipeline to the analysis of large-scale LC-MS/MS data sets demonstrated the effectiveness and high performance of MS-PyCloud. The software can be downloaded at https://github.com/huizhanglab-jhu/ms-pycloud.}, } @article {pmid38868668, year = {2024}, author = {Sochat, V and Culquicondor, A and Ojea, A and Milroy, D}, title = {The Flux Operator.}, journal = {F1000Research}, volume = {13}, number = {}, pages = {203}, pmid = {38868668}, issn = {2046-1402}, mesh = {*Cloud Computing ; Workload ; Workflow ; }, abstract = {Converged computing is an emerging area of computing that brings together the best of both worlds for high performance computing (HPC) and cloud-native communities. The economic influence of cloud computing and the need for workflow portability, flexibility, and manageability are driving this emergence. Navigating the uncharted territory and building an effective space for both HPC and cloud require collaborative technological development and research. In this work, we focus on developing components for the converged workload manager, the central component of batch workflows running in any environment. From the cloud we base our work on Kubernetes, the de facto standard batch workload orchestrator. From HPC the orchestrator counterpart is Flux Framework, a fully hierarchical resource management and graph-based scheduler with a modular architecture that supports sophisticated scheduling and job management. Bringing these managers together consists of implementing Flux inside of Kubernetes, enabling hierarchical resource management and scheduling that scales without burdening the Kubernetes scheduler. This paper introduces the Flux Operator - an on-demand HPC workload manager deployed in Kubernetes. Our work describes design decisions, mapping components between environments, and experimental features. We perform experiments that compare application performance when deployed by the Flux Operator and the MPI Operator and present the results. Finally, we review remaining challenges and describe our vision of the future for improved technological innovation and collaboration through converged computing.}, } @article {pmid38862616, year = {2025}, author = {Salcedo, A and Tarabichi, M and Buchanan, A and Espiritu, SMG and Zhang, H and Zhu, K and Ou Yang, TH and Leshchiner, I and Anastassiou, D and Guan, Y and Jang, GH and Mootor, MFE and Haase, K and Deshwar, AG and Zou, W and Umar, I and Dentro, S and Wintersinger, JA and Chiotti, K and Demeulemeester, J and Jolly, C and Sycza, L and Ko, M and , and , and Wedge, DC and Morris, QD and Ellrott, K and Van Loo, P and Boutros, PC}, title = {Crowd-sourced benchmarking of single-sample tumor subclonal reconstruction.}, journal = {Nature biotechnology}, volume = {43}, number = {4}, pages = {581-592}, pmid = {38862616}, issn = {1546-1696}, support = {U2C CA271894/CA/NCI NIH HHS/United States ; U24 CA248265/CA/NCI NIH HHS/United States ; P30 CA016042/CA/NCI NIH HHS/United States ; R01 CA244729/CA/NCI NIH HHS/United States ; /WT_/Wellcome Trust/United Kingdom ; U01 CA214194/CA/NCI NIH HHS/United States ; P30 CA008748/CA/NCI NIH HHS/United States ; MR/L016311/1/MRC_/Medical Research Council/United Kingdom ; CC2008/ARC_/Arthritis Research UK/United Kingdom ; U54 HG012517/HG/NHGRI NIH HHS/United States ; }, mesh = {Humans ; *Neoplasms/genetics ; *Benchmarking ; Algorithms ; Mutation ; Sequence Analysis, DNA/methods ; Cloud Computing ; }, abstract = {Subclonal reconstruction algorithms use bulk DNA sequencing data to quantify parameters of tumor evolution, allowing an assessment of how cancers initiate, progress and respond to selective pressures. We launched the ICGC-TCGA (International Cancer Genome Consortium-The Cancer Genome Atlas) DREAM Somatic Mutation Calling Tumor Heterogeneity and Evolution Challenge to benchmark existing subclonal reconstruction algorithms. This 7-year community effort used cloud computing to benchmark 31 subclonal reconstruction algorithms on 51 simulated tumors. Algorithms were scored on seven independent tasks, leading to 12,061 total runs. Algorithm choice influenced performance substantially more than tumor features but purity-adjusted read depth, copy-number state and read mappability were associated with the performance of most algorithms on most tasks. No single algorithm was a top performer for all seven tasks and existing ensemble strategies were unable to outperform the best individual methods, highlighting a key research need. All containerized methods, evaluation code and datasets are available to support further assessment of the determinants of subclonal reconstruction accuracy and development of improved methods to understand tumor evolution.}, } @article {pmid38862433, year = {2024}, author = {Ko, G and Lee, JH and Sim, YM and Song, W and Yoon, BH and Byeon, I and Lee, BH and Kim, SO and Choi, J and Jang, I and Kim, H and Yang, JO and Jang, K and Kim, S and Kim, JH and Jeon, J and Jung, J and Hwang, S and Park, JH and Kim, PG and Kim, SY and Lee, B}, title = {KoNA: Korean Nucleotide Archive as A New Data Repository for Nucleotide Sequence Data.}, journal = {Genomics, proteomics & bioinformatics}, volume = {22}, number = {1}, pages = {}, pmid = {38862433}, issn = {2210-3244}, mesh = {Republic of Korea ; *Databases, Nucleic Acid ; Humans ; High-Throughput Nucleotide Sequencing/methods ; }, abstract = {During the last decade, the generation and accumulation of petabase-scale high-throughput sequencing data have resulted in great challenges, including access to human data, as well as transfer, storage, and sharing of enormous amounts of data. To promote data-driven biological research, the Korean government announced that all biological data generated from government-funded research projects should be deposited at the Korea BioData Station (K-BDS), which consists of multiple databases for individual data types. Here, we introduce the Korean Nucleotide Archive (KoNA), a repository of nucleotide sequence data. As of July 2022, the Korean Read Archive in KoNA has collected over 477 TB of raw next-generation sequencing data from national genome projects. To ensure data quality and prepare for international alignment, a standard operating procedure was adopted, which is similar to that of the International Nucleotide Sequence Database Collaboration. The standard operating procedure includes quality control processes for submitted data and metadata using an automated pipeline, followed by manual examination. To ensure fast and stable data transfer, a high-speed transmission system called GBox is used in KoNA. Furthermore, the data uploaded to or downloaded from KoNA through GBox can be readily processed using a cloud computing service called Bio-Express. This seamless coupling of KoNA, GBox, and Bio-Express enhances the data experience, including submission, access, and analysis of raw nucleotide sequences. KoNA not only satisfies the unmet needs for a national sequence repository in Korea but also provides datasets to researchers globally and contributes to advances in genomics. The KoNA is available at https://www.kobic.re.kr/kona/.}, } @article {pmid38860521, year = {2024}, author = {McMurry, AJ and Gottlieb, DI and Miller, TA and Jones, JR and Atreja, A and Crago, J and Desai, PM and Dixon, BE and Garber, M and Ignatov, V and Kirchner, LA and Payne, PRO and Saldanha, AJ and Shankar, PRV and Solad, YV and Sprouse, EA and Terry, M and Wilcox, AB and Mandl, KD}, title = {Cumulus: a federated electronic health record-based learning system powered by Fast Healthcare Interoperability Resources and artificial intelligence.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {31}, number = {8}, pages = {1638-1647}, pmid = {38860521}, issn = {1527-974X}, support = {U18DP006500/CC/CDC HHS/United States ; NU38OT000286/CC/CDC HHS/United States ; NU58IP000004//Centers for Disease Control and Prevention Cooperative/ ; 90AX0031/01-00/OC/ONCHIT HHS/United States ; //United States Department of Health and Human Services/ ; /CC/CDC HHS/United States ; 90AX0031/01-00//National Coordinator for Health Information Technology/ ; NU38OT000286/OT/OSTLTS CDC HHS/United States ; U01 TR002997/TR/NCATS NIH HHS/United States ; //National Association of Chronic Disease Directors/ ; U01TR002623//National Institutes of Health Cooperative/ ; U18DP006500/ACL/ACL HHS/United States ; NU58IP000004/CC/CDC HHS/United States ; U01TR002623/NH/NIH HHS/United States ; }, mesh = {*Electronic Health Records ; *Artificial Intelligence ; Humans ; Software ; Cloud Computing ; Health Information Interoperability ; Information Dissemination ; }, abstract = {OBJECTIVE: To address challenges in large-scale electronic health record (EHR) data exchange, we sought to develop, deploy, and test an open source, cloud-hosted app "listener" that accesses standardized data across the SMART/HL7 Bulk FHIR Access application programming interface (API).

METHODS: We advance a model for scalable, federated, data sharing and learning. Cumulus software is designed to address key technology and policy desiderata including local utility, control, and administrative simplicity as well as privacy preservation during robust data sharing, and artificial intelligence (AI) for processing unstructured text.

RESULTS: Cumulus relies on containerized, cloud-hosted software, installed within a healthcare organization's security envelope. Cumulus accesses EHR data via the Bulk FHIR interface and streamlines automated processing and sharing. The modular design enables use of the latest AI and natural language processing tools and supports provider autonomy and administrative simplicity. In an initial test, Cumulus was deployed across 5 healthcare systems each partnered with public health. Cumulus output is patient counts which were aggregated into a table stratifying variables of interest to enable population health studies. All code is available open source. A policy stipulating that only aggregate data leave the institution greatly facilitated data sharing agreements.

DISCUSSION AND CONCLUSION: Cumulus addresses barriers to data sharing based on (1) federally required support for standard APIs, (2) increasing use of cloud computing, and (3) advances in AI. There is potential for scalability to support learning across myriad network configurations and use cases.}, } @article {pmid38859877, year = {2024}, author = {Yang, T and Du, Y and Sun, M and Meng, J and Li, Y}, title = {Risk Management for Whole-Process Safe Disposal of Medical Waste: Progress and Challenges.}, journal = {Risk management and healthcare policy}, volume = {17}, number = {}, pages = {1503-1522}, pmid = {38859877}, issn = {1179-1594}, abstract = {Over the past decade, the global outbreaks of SARS, influenza A (H1N1), COVID-19, and other major infectious diseases have exposed the insufficient capacity for emergency disposal of medical waste in numerous countries and regions. Particularly during epidemics of major infectious diseases, medical waste exhibits new characteristics such as accelerated growth rate, heightened risk level, and more stringent disposal requirements. Consequently, there is an urgent need for advanced theoretical approaches that can perceive, predict, evaluate, and control risks associated with safe disposal throughout the entire process in a timely, accurate, efficient, and comprehensive manner. This article provides a systematic review of relevant research on collection, storage, transportation, and disposal of medical waste throughout its entirety to illustrate the current state of safe disposal practices. Building upon this foundation and leveraging emerging information technologies like Internet of Things (IoT), cloud computing, big data analytics, and artificial intelligence (AI), we deeply contemplate future research directions with an aim to minimize risks across all stages of medical waste disposal while offering valuable references and decision support to further advance safe disposal practices.}, } @article {pmid38855254, year = {2024}, author = {Ullah, S and Ou, J and Xie, Y and Tian, W}, title = {Facial expression recognition (FER) survey: a vision, architectural elements, and future directions.}, journal = {PeerJ. Computer science}, volume = {10}, number = {}, pages = {e2024}, pmid = {38855254}, issn = {2376-5992}, abstract = {With the cutting-edge advancements in computer vision, facial expression recognition (FER) is an active research area due to its broad practical applications. It has been utilized in various fields, including education, advertising and marketing, entertainment and gaming, health, and transportation. The facial expression recognition-based systems are rapidly evolving due to new challenges, and significant research studies have been conducted on both basic and compound facial expressions of emotions; however, measuring emotions is challenging. Fueled by the recent advancements and challenges to the FER systems, in this article, we have discussed the basics of FER and architectural elements, FER applications and use-cases, FER-based global leading companies, interconnection between FER, Internet of Things (IoT) and Cloud computing, summarize open challenges in-depth to FER technologies, and future directions through utilizing Preferred Reporting Items for Systematic reviews and Meta Analyses Method (PRISMA). In the end, the conclusion and future thoughts are discussed. By overcoming the identified challenges and future directions in this research study, researchers will revolutionize the discipline of facial expression recognition in the future.}, } @article {pmid38854703, year = {2023}, author = {Aman, SS and N'guessan, BG and Agbo, DDA and Kone, T}, title = {Search engine Performance optimization: methods and techniques.}, journal = {F1000Research}, volume = {12}, number = {}, pages = {1317}, pmid = {38854703}, issn = {2046-1402}, abstract = {BACKGROUND: With the rapid advancement of information technology, search engine optimisation (SEO) has become crucial for enhancing the visibility and relevance of online content. In this context, the use of cloud platforms like Microsoft Azure is being explored to bolster SEO capabilities.

METHODS: This scientific article offers an in-depth study of search engine optimisation. It explores the different methods and techniques used to improve the performance and efficiency of a search engine, focusing on key aspects such as result relevance, search speed and user experience. The article also presents case studies and concrete examples to illustrate the practical application of optimisation techniques.

RESULTS: The results demonstrate the importance of optimisation in delivering high quality search results and meeting the increasing demands of users.

CONCLUSIONS: The article addresses the enhancement of search engines through the Microsoft Azure infrastructure and its associated components. It highlights methods such as indexing, semantic analysis, parallel searches, and caching to strengthen the relevance of results, speed up searches, and optimise the user experience. Following the application of these methods, a marked improvement was observed in these areas, thereby showcasing the capability of Microsoft Azure in enhancing search engines. The study sheds light on the implementation and analysis of these Azure-focused techniques, introduces a methodology for assessing their efficacy, and details the specific benefits of each method. Looking forward, the article suggests integrating artificial intelligence to elevate the relevance of results, venturing into other cloud infrastructures to boost performance, and evaluating these methods in specific scenarios, such as multimedia information search. In summary, with Microsoft Azure, the enhancement of search engines appears promising, with increased relevance and a heightened user experience in a rapidly evolving sector.}, } @article {pmid38844552, year = {2024}, author = {Hie, BL and Kim, S and Rando, TA and Bryson, B and Berger, B}, title = {Scanorama: integrating large and diverse single-cell transcriptomic datasets.}, journal = {Nature protocols}, volume = {19}, number = {8}, pages = {2283-2297}, pmid = {38844552}, issn = {1750-2799}, support = {R01 AG068667/AG/NIA NIH HHS/United States ; R35 GM141861/GM/NIGMS NIH HHS/United States ; }, mesh = {*Single-Cell Analysis/methods ; *Transcriptome ; Sequence Analysis, RNA/methods ; Software ; Computational Biology/methods ; Gene Expression Profiling/methods ; Humans ; RNA-Seq/methods ; }, abstract = {Merging diverse single-cell RNA sequencing (scRNA-seq) data from numerous experiments, laboratories and technologies can uncover important biological insights. Nonetheless, integrating scRNA-seq data encounters special challenges when the datasets are composed of diverse cell type compositions. Scanorama offers a robust solution for improving the quality and interpretation of heterogeneous scRNA-seq data by effectively merging information from diverse sources. Scanorama is designed to address the technical variation introduced by differences in sample preparation, sequencing depth and experimental batches that can confound the analysis of multiple scRNA-seq datasets. Here we provide a detailed protocol for using Scanorama within a Scanpy-based single-cell analysis workflow coupled with Google Colaboratory, a cloud-based free Jupyter notebook environment service. The protocol involves Scanorama integration, a process that typically spans 0.5-3 h. Scanorama integration requires a basic understanding of cellular biology, transcriptomic technologies and bioinformatics. Our protocol and new Scanorama-Colaboratory resource should make scRNA-seq integration more widely accessible to researchers.}, } @article {pmid38839812, year = {2024}, author = {Zheng, P and Yang, J and Lou, J and Wang, B}, title = {Design and application of virtual simulation teaching platform for intelligent manufacturing.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {12895}, pmid = {38839812}, issn = {2045-2322}, support = {ZJXF2022126//the Special Project of Scientific Research and Development Center of Higher Education Institutions, Ministry of Education of the People's Republic of China/ ; }, abstract = {Aiming at the practical teaching of intelligent manufacturing majors faced with lack of equipment, tense teachers and other problems such as high equipment investment, high material loss, high teaching risk, difficult to implement internship, difficult to observe production, difficult to reproduce the results, and so on, we take the electrical automation technology, mechatronics technology and industrial robotics technology majors of intelligent manufacturing majors as an example, and design and establish a virtual simulation teaching platform for intelligent manufacturing majors by using the cloud computing platform, edge computing technology, and terminal equipment synergy. The platform includes six major virtual simulation modules, including virtual simulation of electrician electronics and PLC control, virtual and real combination of typical production lines of intelligent manufacturing, dual-axis collaborative robotics workstation, digital twin simulation, virtual disassembly of industrial robots, virtual simulation of magnetic yoke axis flexible production line. The platform covers the virtual simulation teaching content of basic principle experiments, advanced application experiments, and advanced integration experiments in intelligent manufacturing majors. In order to test the effectiveness of this virtual simulation platform for practical teaching in engineering, this paper organizes a teaching practice activity involving 246 students from two parallel classes of three different majors. Through a one-year teaching application, we analyzed the data on the grades of 7 core courses involved in three majors in one academic year, the proportion of participation in competitions and innovative activities, the number of awards and certificates of professional qualifications, and the subjective questionnaires of the testers. The analysis shows that the learners who adopt the virtual simulation teaching platform proposed in this paper for practical teaching are better than the learners under the traditional teaching method in terms of academic performance, proportion of participation in competitions and innovative activities, and proportion of awards and certificates by more than 13%, 37%, 36%, 27% and 22%, respectively. Therefore, the virtual simulation teaching platform of intelligent manufacturing established in this paper has obvious superiority in solving the problem of "three highs and three difficulties" existing in the practical teaching of engineering, and according to the questionnaire feedback from the testers, the platform can effectively alleviate the shortage of practical training equipment, stimulate the interest in learning, and help to broaden and improve the knowledge system of the learners.}, } @article {pmid38838394, year = {2024}, author = {Lai, Q and Guo, S}, title = {Heterogeneous coexisting attractors, large-scale amplitude control and finite-time synchronization of central cyclic memristive neural networks.}, journal = {Neural networks : the official journal of the International Neural Network Society}, volume = {178}, number = {}, pages = {106412}, doi = {10.1016/j.neunet.2024.106412}, pmid = {38838394}, issn = {1879-2782}, mesh = {*Neural Networks, Computer ; *Nonlinear Dynamics ; Computer Simulation ; Artificial Intelligence ; Algorithms ; Humans ; }, abstract = {Memristors are of great theoretical and practical significance for chaotic dynamics research of brain-like neural networks due to their excellent physical properties such as brain synapse-like memorability and nonlinearity, especially crucial for the promotion of AI big models, cloud computing, and intelligent systems in the artificial intelligence field. In this paper, we introduce memristors as self-connecting synapses into a four-dimensional Hopfield neural network, constructing a central cyclic memristive neural network (CCMNN), and achieving its effective control. The model adopts a central loop topology and exhibits a variety of complex dynamic behaviors such as chaos, bifurcation, and homogeneous and heterogeneous coexisting attractors. The complex dynamic behaviors of the CCMNN are investigated in depth numerically by equilibrium point stability analysis as well as phase trajectory maps, bifurcation maps, time-domain maps, and LEs. It is found that with the variation of the internal parameters of the memristor, asymmetric heterogeneous attractor coexistence phenomena appear under different initial conditions, including the multi-stable coexistence behaviors of periodic-periodic, periodic-stable point, periodic-chaotic, and stable point-chaotic. In addition, by adjusting the structural parameters, a wide range of amplitude control can be realized without changing the chaotic state of the system. Finally, based on the CCMNN model, an adaptive synchronization controller is designed to achieve finite-time synchronization control, and its application prospect in simple secure communication is discussed. A microcontroller-based hardware circuit and NIST test are conducted to verify the correctness of the numerical results and theoretical analysis.}, } @article {pmid38837943, year = {2024}, author = {Oliva, A and Kaphle, A and Reguant, R and Sng, LMF and Twine, NA and Malakar, Y and Wickramarachchi, A and Keller, M and Ranbaduge, T and Chan, EKF and Breen, J and Buckberry, S and Guennewig, B and Haas, M and Brown, A and Cowley, MJ and Thorne, N and Jain, Y and Bauer, DC}, title = {Future-proofing genomic data and consent management: a comprehensive review of technology innovations.}, journal = {GigaScience}, volume = {13}, number = {}, pages = {}, pmid = {38837943}, issn = {2047-217X}, mesh = {Humans ; *Genomics/methods/ethics ; Computer Security ; Cloud Computing ; Informed Consent ; }, abstract = {Genomic information is increasingly used to inform medical treatments and manage future disease risks. However, any personal and societal gains must be carefully balanced against the risk to individuals contributing their genomic data. Expanding our understanding of actionable genomic insights requires researchers to access large global datasets to capture the complexity of genomic contribution to diseases. Similarly, clinicians need efficient access to a patient's genome as well as population-representative historical records for evidence-based decisions. Both researchers and clinicians hence rely on participants to consent to the use of their genomic data, which in turn requires trust in the professional and ethical handling of this information. Here, we review existing and emerging solutions for secure and effective genomic information management, including storage, encryption, consent, and authorization that are needed to build participant trust. We discuss recent innovations in cloud computing, quantum-computing-proof encryption, and self-sovereign identity. These innovations can augment key developments from within the genomics community, notably GA4GH Passports and the Crypt4GH file container standard. We also explore how decentralized storage as well as the digital consenting process can offer culturally acceptable processes to encourage data contributions from ethnic minorities. We conclude that the individual and their right for self-determination needs to be put at the center of any genomics framework, because only on an individual level can the received benefits be accurately balanced against the risk of exposing private information.}, } @article {pmid38834903, year = {2025}, author = {Peter, R and Moreira, S and Tagliabue, E and Hillenbrand, M and Nunes, RG and Mathis-Ullrich, F}, title = {Stereo reconstruction from microscopic images for computer-assisted ophthalmic surgery.}, journal = {International journal of computer assisted radiology and surgery}, volume = {20}, number = {3}, pages = {605-612}, pmid = {38834903}, issn = {1861-6429}, mesh = {*Surgery, Computer-Assisted/methods ; Swine ; *Imaging, Three-Dimensional/methods ; Animals ; *Ophthalmologic Surgical Procedures/methods ; *Microscopy/methods ; Phantoms, Imaging ; Cornea/anatomy & histology/surgery ; Humans ; *Image Processing, Computer-Assisted/methods ; }, abstract = {PURPOSE: This work presents a novel platform for stereo reconstruction in anterior segment ophthalmic surgery to enable enhanced scene understanding, especially depth perception, for advanced computer-assisted eye surgery by effectively addressing the lack of texture and corneal distortions artifacts in the surgical scene.

METHODS: The proposed platform for stereo reconstruction uses a two-step approach: generating a sparse 3D point cloud from microscopic images, deriving a dense 3D representation by fitting surfaces onto the point cloud, and considering geometrical priors of the eye anatomy. We incorporate a pre-processing step to rectify distortion artifacts induced by the cornea's high refractive power, achieved by aligning a 3D phenotypical cornea geometry model to the images and computing a distortion map using ray tracing.

RESULTS: The accuracy of 3D reconstruction is evaluated on stereo microscopic images of ex vivo porcine eyes, rigid phantom eyes, and synthetic photo-realistic images. The results demonstrate the potential of the proposed platform to enhance scene understanding via an accurate 3D representation of the eye and enable the estimation of instrument to layer distances in porcine eyes with a mean average error of 190  μ m , comparable to the scale of surgeons' hand tremor.

CONCLUSION: This work marks a significant advancement in stereo reconstruction for ophthalmic surgery by addressing corneal distortions, a previously often overlooked aspect in such surgical scenarios. This could improve surgical outcomes by allowing for intra-operative computer assistance, e.g., in the form of virtual distance sensors.}, } @article {pmid38833448, year = {2024}, author = {H S, M and Gupta, P}, title = {Federated learning inspired Antlion based orchestration for Edge computing environment.}, journal = {PloS one}, volume = {19}, number = {6}, pages = {e0304067}, pmid = {38833448}, issn = {1932-6203}, mesh = {*Neural Networks, Computer ; *Algorithms ; Fuzzy Logic ; Internet of Things ; Cloud Computing ; }, abstract = {Edge computing is a scalable, modern, and distributed computing architecture that brings computational workloads closer to smart gateways or Edge devices. This computing model delivers IoT (Internet of Things) computations and processes the IoT requests from the Edge of the network. In a diverse and independent environment like Fog-Edge, resource management is a critical issue. Hence, scheduling is a vital process to enhance efficiency and allocation of resources properly to the tasks. The manuscript proposes an Artificial Neural Network (ANN) inspired Antlion algorithm for task orchestration Edge environments. Its aim is to enhance resource utilization and reduce energy consumption. Comparative analysis with different algorithms shows that the proposed algorithm balances the load on the Edge layer, which results in lower load on the cloud, improves power consumption, CPU utilization, network utilization, and reduces average waiting time for requests. The proposed model is tested for healthcare application in Edge computing environment. The evaluation shows that the proposed algorithm outperforms existing fuzzy logic algorithms. The performance of the ANN inspired Antlion based orchestration approach is evaluated using performance metrics, power consumption, CPU utilization, network utilization, and average waiting time for requests respectively. It outperforms the existing fuzzy logic, round robin algorithm. The proposed technique achieves an average cloud energy consumption improvement of 95.94%, and average Edge energy consumption improvement of 16.79%, 19.85% in average CPU utilization in Edge computing environment, 10.64% in average CPU utilization in cloud environment, and 23.33% in average network utilization, and the average waiting time decreases by 96% compared to fuzzy logic and 1.4% compared to round-robin respectively.}, } @article {pmid38832828, year = {2024}, author = {Herre, C and Ho, A and Eisenbraun, B and Vincent, J and Nicholson, T and Boutsioukis, G and Meyer, PA and Ottaviano, M and Krause, KL and Key, J and Sliz, P}, title = {Introduction of the Capsules environment to support further growth of the SBGrid structural biology software collection.}, journal = {Acta crystallographica. Section D, Structural biology}, volume = {80}, number = {Pt 6}, pages = {439-450}, pmid = {38832828}, issn = {2059-7983}, support = {R25 GM151273/GM/NIGMS NIH HHS/United States ; 1R25GM151273//National Institutes of Health, National Institute of General Medical Sciences/ ; 21-UOO-003-CSG//Royal Society Te Apārangi/ ; }, mesh = {*Software ; Computational Biology/methods ; }, abstract = {The expansive scientific software ecosystem, characterized by millions of titles across various platforms and formats, poses significant challenges in maintaining reproducibility and provenance in scientific research. The diversity of independently developed applications, evolving versions and heterogeneous components highlights the need for rigorous methodologies to navigate these complexities. In response to these challenges, the SBGrid team builds, installs and configures over 530 specialized software applications for use in the on-premises and cloud-based computing environments of SBGrid Consortium members. To address the intricacies of supporting this diverse application collection, the team has developed the Capsule Software Execution Environment, generally referred to as Capsules. Capsules rely on a collection of programmatically generated bash scripts that work together to isolate the runtime environment of one application from all other applications, thereby providing a transparent cross-platform solution without requiring specialized tools or elevated account privileges for researchers. Capsules facilitate modular, secure software distribution while maintaining a centralized, conflict-free environment. The SBGrid platform, which combines Capsules with the SBGrid collection of structural biology applications, aligns with FAIR goals by enhancing the findability, accessibility, interoperability and reusability of scientific software, ensuring seamless functionality across diverse computing environments. Its adaptability enables application beyond structural biology into other scientific fields.}, } @article {pmid38829364, year = {2024}, author = {Rathinam, R and Sivakumar, P and Sigamani, S and Kothandaraman, I}, title = {SJFO: Sail Jelly Fish Optimization enabled VM migration with DRNN-based prediction for load balancing in cloud computing.}, journal = {Network (Bristol, England)}, volume = {35}, number = {4}, pages = {403-428}, doi = {10.1080/0954898X.2024.2359609}, pmid = {38829364}, issn = {1361-6536}, mesh = {*Neural Networks, Computer ; *Cloud Computing ; Animals ; Humans ; Algorithms ; }, abstract = {The dynamic workload is evenly distributed among all nodes using balancing methods like hosts or VMs. Load Balancing as a Service (LBaaS) is another name for load balancing in the cloud. In this research work, the load is balanced by the application of Virtual Machine (VM) migration carried out by proposed Sail Jelly Fish Optimization (SJFO). The SJFO is formed by combining Sail Fish Optimizer (SFO) and Jellyfish Search (JS) optimizer. In the Cloud model, many Physical Machines (PMs) are present, where these PMs are comprised of many VMs. Each VM has many tasks, and these tasks depend on various parameters like Central Processing Unit (CPU), memory, Million Instructions per Second (MIPS), capacity, total number of processing entities, as well as bandwidth. Here, the load is predicted by Deep Recurrent Neural Network (DRNN) and this predicted load is compared with a threshold value, where VM migration is done based on predicted values. Furthermore, the performance of SJFO-VM is analysed using the metrics like capacity, load, and resource utilization. The proposed method shows better performance with a superior capacity of 0.598, an inferior load of 0.089, and an inferior resource utilization of 0.257.}, } @article {pmid38828387, year = {2024}, author = {McCormick, I and Butcher, R and Ramke, J and Bolster, NM and Limburg, H and Chroston, H and Bastawrous, A and Burton, MJ and Mactaggart, I}, title = {The Rapid Assessment of Avoidable Blindness survey: Review of the methodology and protocol for the seventh version (RAAB7).}, journal = {Wellcome open research}, volume = {9}, number = {}, pages = {133}, pmid = {38828387}, issn = {2398-502X}, abstract = {The Rapid Assessment of Avoidable Blindness (RAAB) is a population-based cross-sectional survey methodology used to collect data on the prevalence of vision impairment and its causes and eye care service indicators among the population 50 years and older. RAAB has been used for over 20 years with modifications to the protocol over time reflected in changing version numbers; this paper describes the latest version of the methodology-RAAB7. RAAB7 is a collaborative project between the International Centre for Eye Health and Peek Vision with guidance from a steering group of global eye health stakeholders. We have fully digitised RAAB, allowing for fast, accurate and secure data collection. A bespoke Android mobile application automatically synchronises data to a secure Amazon Web Services virtual private cloud when devices are online so users can monitor data collection in real-time. Vision is screened using Peek Vision's digital visual acuity test for mobile devices and uncorrected, corrected and pinhole visual acuity are collected. An optional module on Disability is available. We have rebuilt the RAAB data repository as the end point of RAAB7's digital data workflow, including a front-end website to access the past 20 years of RAAB surveys worldwide. This website (https://www.raab.world) hosts open access RAAB data to support the advocacy and research efforts of the global eye health community. Active research sub-projects are finalising three new components in 2024-2025: 1) Near vision screening to address data gaps on near vision impairment and effective refractive error coverage; 2) an optional Health Economics module to assess the affordability of eye care services and productivity losses associated with vision impairment; 3) an optional Health Systems data collection module to support RAAB's primary aim to inform eye health service planning by supporting users to integrate eye care facility data with population data.}, } @article {pmid38828338, year = {2024}, author = {Zhu, X and Peng, X}, title = {Strategic assessment model of smart stadiums based on genetic algorithms and literature visualization analysis: A case study from Chengdu, China.}, journal = {Heliyon}, volume = {10}, number = {11}, pages = {e31759}, pmid = {38828338}, issn = {2405-8440}, abstract = {This paper leverages Citespace and VOSviewer software to perform a comprehensive bibliometric analysis on a corpus of 384 references related to smart sports venues, spanning from 1998 to 2022. The analysis encompasses various facets, including author network analysis, institutional network analysis, temporal mapping, keyword clustering, and co-citation network analysis. Moreover, this paper constructs a smart stadiums strategic assessment model (SSSAM) to compensate for confusion and aimlessness by genetic algorithms (GA). Our findings indicate an exponential growth in publications on smart sports venues year over year. Arizona State University emerges as the institution with the highest number of collaborative publications, Energy and Buildings becomes the publication with the most documents. While, Wang X stands out as the scholar with the most substantial contribution to the field. In scrutinizing the betweenness centrality indicators, a paradigm shift in research hotspots becomes evident-from intelligent software to the domains of the Internet of Things (IoT), intelligent services, and artificial intelligence (AI). The SSSAM model based on artificial neural networks (ANN) and GA algorithms also reached similar conclusions through a case study of the International University Sports Federation (FISU), building Information Modeling (BIM), cloud computing and artificial intelligence Internet of Things (AIoT) are expected to develop in the future. Three key themes developed over time. Finally, a comprehensive knowledge system with common references and future hot spots is proposed.}, } @article {pmid38827487, year = {2024}, author = {Nisanova, A and Yavary, A and Deaner, J and Ali, FS and Gogte, P and Kaplan, R and Chen, KC and Nudleman, E and Grewal, D and Gupta, M and Wolfe, J and Klufas, M and Yiu, G and Soltani, I and Emami-Naeini, P}, title = {Performance of Automated Machine Learning in Predicting Outcomes of Pneumatic Retinopexy.}, journal = {Ophthalmology science}, volume = {4}, number = {5}, pages = {100470}, pmid = {38827487}, issn = {2666-9145}, abstract = {PURPOSE: Automated machine learning (AutoML) has emerged as a novel tool for medical professionals lacking coding experience, enabling them to develop predictive models for treatment outcomes. This study evaluated the performance of AutoML tools in developing models predicting the success of pneumatic retinopexy (PR) in treatment of rhegmatogenous retinal detachment (RRD). These models were then compared with custom models created by machine learning (ML) experts.

DESIGN: Retrospective multicenter study.

PARTICIPANTS: Five hundred and thirty nine consecutive patients with primary RRD that underwent PR by a vitreoretinal fellow at 6 training hospitals between 2002 and 2022.

METHODS: We used 2 AutoML platforms: MATLAB Classification Learner and Google Cloud AutoML. Additional models were developed by computer scientists. We included patient demographics and baseline characteristics, including lens and macula status, RRD size, number and location of breaks, presence of vitreous hemorrhage and lattice degeneration, and physicians' experience. The dataset was split into a training (n = 483) and test set (n = 56). The training set, with a 2:1 success-to-failure ratio, was used to train the MATLAB models. Because Google Cloud AutoML requires a minimum of 1000 samples, the training set was tripled to create a new set with 1449 datapoints. Additionally, balanced datasets with a 1:1 success-to-failure ratio were created using Python.

MAIN OUTCOME MEASURES: Single-procedure anatomic success rate, as predicted by the ML models. F2 scores and area under the receiver operating curve (AUROC) were used as primary metrics to compare models.

RESULTS: The best performing AutoML model (F2 score: 0.85; AUROC: 0.90; MATLAB), showed comparable performance to the custom model (0.92, 0.86) when trained on the balanced datasets. However, training the AutoML model with imbalanced data yielded misleadingly high AUROC (0.81) despite low F2-score (0.2) and sensitivity (0.17).

CONCLUSIONS: We demonstrated the feasibility of using AutoML as an accessible tool for medical professionals to develop models from clinical data. Such models can ultimately aid in the clinical decision-making, contributing to better patient outcomes. However, outcomes can be misleading or unreliable if used naively. Limitations exist, particularly if datasets contain missing variables or are highly imbalanced. Proper model selection and data preprocessing can improve the reliability of AutoML tools.

FINANCIAL DISCLOSURES: Proprietary or commercial disclosure may be found in the Footnotes and Disclosures at the end of this article.}, } @article {pmid38826407, year = {2024}, author = {Rodriguez, A and Kim, Y and Nandi, TN and Keat, K and Kumar, R and Bhukar, R and Conery, M and Liu, M and Hessington, J and Maheshwari, K and Schmidt, D and , and Begoli, E and Tourassi, G and Muralidhar, S and Natarajan, P and Voight, BF and Cho, K and Gaziano, JM and Damrauer, SM and Liao, KP and Zhou, W and Huffman, JE and Verma, A and Madduri, RK}, title = {Accelerating Genome- and Phenome-Wide Association Studies using GPUs - A case study using data from the Million Veteran Program.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {38826407}, issn = {2692-8205}, support = {I01 BX004821/BX/BLRD VA/United States ; P30 AR072577/AR/NIAMS NIH HHS/United States ; I01 CX001849/CX/CSRD VA/United States ; IK2 CX001780/CX/CSRD VA/United States ; R01 AG067025/AG/NIA NIH HHS/United States ; I01 CX001737/CX/CSRD VA/United States ; K99 HG012222/HG/NHGRI NIH HHS/United States ; R01 AG065582/AG/NIA NIH HHS/United States ; I01 BX005831/BX/BLRD VA/United States ; R01 GM138597/GM/NIGMS NIH HHS/United States ; K08 MH122911/MH/NIMH NIH HHS/United States ; UM1 DK126194/DK/NIDDK NIH HHS/United States ; T32 AA028259/AA/NIAAA NIH HHS/United States ; R01 LM010685/LM/NLM NIH HHS/United States ; I01 BX004189/BX/BLRD VA/United States ; }, abstract = {The expansion of biobanks has significantly propelled genomic discoveries yet the sheer scale of data within these repositories poses formidable computational hurdles, particularly in handling extensive matrix operations required by prevailing statistical frameworks. In this work, we introduce computational optimizations to the SAIGE (Scalable and Accurate Implementation of Generalized Mixed Model) algorithm, notably employing a GPU-based distributed computing approach to tackle these challenges. We applied these optimizations to conduct a large-scale genome-wide association study (GWAS) across 2,068 phenotypes derived from electronic health records of 635,969 diverse participants from the Veterans Affairs (VA) Million Veteran Program (MVP). Our strategies enabled scaling up the analysis to over 6,000 nodes on the Department of Energy (DOE) Oak Ridge Leadership Computing Facility (OLCF) Summit High-Performance Computer (HPC), resulting in a 20-fold acceleration compared to the baseline model. We also provide a Docker container with our optimizations that was successfully used on multiple cloud infrastructures on UK Biobank and All of Us datasets where we showed significant time and cost benefits over the baseline SAIGE model.}, } @article {pmid38826171, year = {2024}, author = {Lowndes, JS and Holder, AM and Markowitz, EH and Clatterbuck, C and Bradford, AL and Doering, K and Stevens, MH and Butland, S and Burke, D and Kross, S and Hollister, JW and Stawitz, C and Siple, MC and Rios, A and Welch, JN and Li, B and Nojavan, F and Davis, A and Steiner, E and London, JM and Fenwick, I and Hunzinger, A and Verstaen, J and Holmes, E and Virdi, M and Barrett, AP and Robinson, E}, title = {Shifting institutional culture to develop climate solutions with Open Science.}, journal = {Ecology and evolution}, volume = {14}, number = {6}, pages = {e11341}, pmid = {38826171}, issn = {2045-7758}, abstract = {To address our climate emergency, "we must rapidly, radically reshape society"-Johnson & Wilkinson, All We Can Save. In science, reshaping requires formidable technical (cloud, coding, reproducibility) and cultural shifts (mindsets, hybrid collaboration, inclusion). We are a group of cross-government and academic scientists that are exploring better ways of working and not being too entrenched in our bureaucracies to do better science, support colleagues, and change the culture at our organizations. We share much-needed success stories and action for what we can all do to reshape science as part of the Open Science movement and 2023 Year of Open Science.}, } @article {pmid38813089, year = {2024}, author = {Mimar, S and Paul, AS and Lucarelli, N and Border, S and Naglah, A and Barisoni, L and Hodgin, J and Rosenberg, AZ and Clapp, W and Sarder, P}, title = {ComPRePS: An Automated Cloud-based Image Analysis tool to democratize AI in Digital Pathology.}, journal = {Proceedings of SPIE--the International Society for Optical Engineering}, volume = {12933}, number = {}, pages = {}, pmid = {38813089}, issn = {0277-786X}, support = {R21 DK128668/DK/NIDDK NIH HHS/United States ; R01 DK114485/DK/NIDDK NIH HHS/United States ; U01 DK133090/DK/NIDDK NIH HHS/United States ; R01 DK129541/DK/NIDDK NIH HHS/United States ; OT2 OD033753/OD/NIH HHS/United States ; }, abstract = {Artificial intelligence (AI) has extensive applications in a wide range of disciplines including healthcare and clinical practice. Advances in high-resolution whole-slide brightfield microscopy allow for the digitization of histologically stained tissue sections, producing gigapixel-scale whole-slide images (WSI). The significant improvement in computing and revolution of deep neural network (DNN)-based AI technologies over the last decade allow us to integrate massively parallelized computational power, cutting-edge AI algorithms, and big data storage, management, and processing. Applied to WSIs, AI has created opportunities for improved disease diagnostics and prognostics with the ultimate goal of enhancing precision medicine and resulting patient care. The National Institutes of Health (NIH) has recognized the importance of developing standardized principles for data management and discovery for the advancement of science and proposed the Findable, Accessible, Interoperable, Reusable, (FAIR) Data Principles[1] with the goal of building a modernized biomedical data resource ecosystem to establish collaborative research communities. In line with this mission and to democratize AI-based image analysis in digital pathology, we propose ComPRePS: an end-to-end automated Computational Renal Pathology Suite which combines massive scalability, on-demand cloud computing, and an easy-to-use web-based user interface for data upload, storage, management, slide-level visualization, and domain expert interaction. Moreover, our platform is equipped with both in-house and collaborator developed sophisticated AI algorithms in the back-end server for image analysis to identify clinically relevant micro-anatomic functional tissue units (FTU) and to extract image features.}, } @article {pmid38810758, year = {2024}, author = {Yu, J and Nie, S and Liu, W and Zhu, X and Sun, Z and Li, J and Wang, C and Xi, X and Fan, H}, title = {Mapping global mangrove canopy height by integrating Ice, Cloud, and Land Elevation Satellite-2 photon-counting LiDAR data with multi-source images.}, journal = {The Science of the total environment}, volume = {939}, number = {}, pages = {173487}, doi = {10.1016/j.scitotenv.2024.173487}, pmid = {38810758}, issn = {1879-1026}, abstract = {Large-scale and precise measurement of mangrove canopy height is crucial for understanding and evaluating wetland ecosystems' condition, health, and productivity. This study generates a global mangrove canopy height map with a 30 m resolution by integrating Ice, Cloud, and Land Elevation Satellite-2 (ICESat-2) photon-counting light detection and ranging (LiDAR) data with multi-source imagery. Initially, high-quality mangrove canopy height samples were extracted using meticulous processing and filtering of ICESat-2 data. Subsequently, mangrove canopy height models were established using the random forest (RF) algorithm, incorporating ICESat-2 canopy height samples, Sentinel-2 data, TanDEM-X DEM data and WorldClim data. Furthermore, a global 30 m mangrove canopy height map was generated utilizing the Google Earth Engine platform. Finally, the global map's accuracy was evaluated by comparing it with reference canopy heights derived from both space-borne and airborne LiDAR data. Results indicate that the global 30 m resolution mangrove height map was found to be consistent with canopy heights obtained from space-borne (r = 0.88, Bisa = -0.07 m, RMSE = 3.66 m, RMSE% = 29.86 %) and airborne LiDAR (r = 0.52, Bisa = -1.08 m, RMSE = 3.39 m, RMSE% = 39.05 %). Additionally, our findings reveal that mangroves worldwide exhibit an average height of 12.65 m, with the tallest mangrove reaching a height of 44.94 m. These results demonstrate the feasibility and effectiveness of using ICESat-2 data integrated with multi-source imagery to generate a global mangrove canopy height map. This dataset offers reliable information that can significantly support government and organizational efforts to protect and conserve mangrove ecosystems.}, } @article {pmid38798429, year = {2024}, author = {Oh, S and Gravel-Pucillo, K and Ramos, M and Davis, S and Carey, V and Morgan, M and Waldron, L}, title = {AnVILWorkflow: A runnable workflow package for Cloud-implemented bioinformatics analysis pipelines.}, journal = {Research square}, volume = {}, number = {}, pages = {}, pmid = {38798429}, issn = {2693-5015}, support = {U24 HG010263/HG/NHGRI NIH HHS/United States ; }, abstract = {Advancements in sequencing technologies and the development of new data collection methods produce large volumes of biological data. The Genomic Data Science Analysis, Visualization, and Informatics Lab-space (AnVIL) provides a cloud-based platform for democratizing access to large-scale genomics data and analysis tools. However, utilizing the full capabilities of AnVIL can be challenging for researchers without extensive bioinformatics expertise, especially for executing complex workflows. Here we present the AnVILWorkflow R package, which enables the convenient execution of bioinformatics workflows hosted on AnVIL directly from an R environment. AnVILWorkflowsimplifies the setup of the cloud computing environment, input data formatting, workflow submission, and retrieval of results through intuitive functions. We demonstrate the utility of AnVILWorkflowfor three use cases: bulk RNA-seq analysis with Salmon, metagenomics analysis with bioBakery, and digital pathology image processing with PathML. The key features of AnVILWorkflow include user-friendly browsing of available data and workflows, seamless integration of R and non-R tools within a reproducible analysis pipeline, and accessibility to scalable computing resources without direct management overhead. While some limitations exist around workflow customization, AnVILWorkflowlowers the barrier to taking advantage of AnVIL's resources, especially for exploratory analyses or bulk processing with established workflows. This empowers a broader community of researchers to leverage the latest genomics tools and datasets using familiar R syntax. This package is distributed through the Bioconductor project (https://bioconductor.org/packages/AnVILWorkflow), and the source code is available through GitHub (https://github.com/shbrief/AnVILWorkflow).}, } @article {pmid38797827, year = {2024}, author = {Alrashdi, I}, title = {Fog-based deep learning framework for real-time pandemic screening in smart cities from multi-site tomographies.}, journal = {BMC medical imaging}, volume = {24}, number = {1}, pages = {123}, pmid = {38797827}, issn = {1471-2342}, support = {DGSSR-2023-02-02058//Graduate Studies and Scientific Research at Jouf University/ ; }, mesh = {Humans ; *Deep Learning ; *COVID-19 ; *Tomography, X-Ray Computed/methods ; *Pandemics ; SARS-CoV-2 ; Cities ; Internet of Things ; }, abstract = {The quick proliferation of pandemic diseases has been imposing many concerns on the international health infrastructure. To combat pandemic diseases in smart cities, Artificial Intelligence of Things (AIoT) technology, based on the integration of artificial intelligence (AI) with the Internet of Things (IoT), is commonly used to promote efficient control and diagnosis during the outbreak, thereby minimizing possible losses. However, the presence of multi-source institutional data remains one of the major challenges hindering the practical usage of AIoT solutions for pandemic disease diagnosis. This paper presents a novel framework that utilizes multi-site data fusion to boost the accurateness of pandemic disease diagnosis. In particular, we focus on a case study of COVID-19 lesion segmentation, a crucial task for understanding disease progression and optimizing treatment strategies. In this study, we propose a novel multi-decoder segmentation network for efficient segmentation of infections from cross-domain CT scans in smart cities. The multi-decoder segmentation network leverages data from heterogeneous domains and utilizes strong learning representations to accurately segment infections. Performance evaluation of the multi-decoder segmentation network was conducted on three publicly accessible datasets, demonstrating robust results with an average dice score of 89.9% and an average surface dice of 86.87%. To address scalability and latency issues associated with centralized cloud systems, fog computing (FC) emerges as a viable solution. FC brings resources closer to the operator, offering low latency and energy-efficient data management and processing. In this context, we propose a unique FC technique called PANDFOG to deploy the multi-decoder segmentation network on edge nodes for practical and clinical applications of automated COVID-19 pneumonia analysis. The results of this study highlight the efficacy of the multi-decoder segmentation network in accurately segmenting infections from cross-domain CT scans. Moreover, the proposed PANDFOG system demonstrates the practical deployment of the multi-decoder segmentation network on edge nodes, providing real-time access to COVID-19 segmentation findings for improved patient monitoring and clinical decision-making.}, } @article {pmid38794890, year = {2024}, author = {Dos Santos, HCAS and Armellini, BRC and Naves, GL and Bueris, V and Moreno, ACR and Ferreira, RCC}, title = {Using "Adopt a Bacterium" as an e-learning tool for simultaneously teaching microbiology to different health-related university courses.}, journal = {FEMS microbiology letters}, volume = {371}, number = {}, pages = {}, doi = {10.1093/femsle/fnae033}, pmid = {38794890}, issn = {1574-6968}, support = {//Conselho Nacional de Desenvolvimento Científico e Tecnológico/ ; //Fundação de Amparo à Pesquisa do Estado de São Paulo/ ; }, mesh = {*Education, Distance/methods ; *Microbiology/education ; Humans ; Universities ; *COVID-19 ; SARS-CoV-2 ; Students ; Pandemics ; Computer-Assisted Instruction/methods ; }, abstract = {The COVID-19 pandemic has posed challenges for education, particularly in undergraduate teaching. In this study, we report on the experience of how a private university successfully addressed this challenge through an active methodology applied to a microbiology discipline offered remotely to students from various health-related courses (veterinary, physiotherapy, nursing, biomedicine, and nutrition). Remote teaching was combined with the "Adopt a Bacterium" methodology, implemented for the first time on Google Sites. The distance learning activity notably improved student participation in microbiology discussions, both through word cloud analysis and the richness of discourse measured by the Shannon index. Furthermore, feedback from students about the e-learning approach was highly positive, indicating its effectiveness in motivating and involving students in the learning process. The results also demonstrate that despite being offered simultaneously to students, the methodology allowed for the acquisition of specialized knowledge within each course and sparked student interest in various aspects of microbiology. In conclusion, the remote "Adopt a Bacterium" methodology facilitated knowledge sharing among undergraduate students from different health-related courses and represented a valuable resource in distance microbiology education.}, } @article {pmid38794107, year = {2024}, author = {Shaghaghi, N and Fazlollahi, F and Shrivastav, T and Graham, A and Mayer, J and Liu, B and Jiang, G and Govindaraju, N and Garg, S and Dunigan, K and Ferguson, P}, title = {DOxy: A Dissolved Oxygen Monitoring System.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {10}, pages = {}, pmid = {38794107}, issn = {1424-8220}, abstract = {Dissolved Oxygen (DO) in water enables marine life. Measuring the prevalence of DO in a body of water is an important part of sustainability efforts because low oxygen levels are a primary indicator of contamination and distress in bodies of water. Therefore, aquariums and aquaculture of all types are in need of near real-time dissolved oxygen monitoring and spend a lot of money on purchasing and maintaining DO meters that are either expensive, inefficient, or manually operated-in which case they also need to ensure that manual readings are taken frequently which is time consuming. Hence a cost-effective and sustainable automated Internet of Things (IoT) system for this task is necessary and long overdue. DOxy, is such an IoT system under research and development at Santa Clara University's Ethical, Pragmatic, and Intelligent Computing (EPIC) Laboratory which utilizes cost-effective, accessible, and sustainable Sensing Units (SUs) for measuring the dissolved oxygen levels present in bodies of water which send their readings to a web based cloud infrastructure for storage, analysis, and visualization. DOxy's SUs are equipped with a High-sensitivity Pulse Oximeter meant for measuring dissolved oxygen levels in human blood, not water. Hence a number of parallel readings of water samples were gathered by both the High-sensitivity Pulse Oximeter and a standard dissolved oxygen meter. Then, two approaches for relating the readings were investigated. In the first, various machine learning models were trained and tested to produce a dynamic mapping of sensor readings to actual DO values. In the second, curve-fitting models were used to produce a successful conversion formula usable in the DOxy SUs offline. Both proved successful in producing accurate results.}, } @article {pmid38794080, year = {2024}, author = {Kitsiou, A and Sideri, M and Pantelelis, M and Simou, S and Mavroeidi, AG and Vgena, K and Tzortzaki, E and Kalloniatis, C}, title = {Specification of Self-Adaptive Privacy-Related Requirements within Cloud Computing Environments (CCE).}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {10}, pages = {}, pmid = {38794080}, issn = {1424-8220}, support = {2550//Hellenic Foundation for Research and Innovation/ ; }, abstract = {This paper presents a novel approach to address the challenges of self-adaptive privacy in cloud computing environments (CCE). Under the Cloud-InSPiRe project, the aim is to provide an interdisciplinary framework and a beta-version tool for self-adaptive privacy design, effectively focusing on the integration of technical measures with social needs. To address that, a pilot taxonomy that aligns technical, infrastructural, and social requirements is proposed after two supplementary surveys that have been conducted, focusing on users' privacy needs and developers' perspectives on self-adaptive privacy. Through the integration of users' social identity-based practices and developers' insights, the taxonomy aims to provide clear guidance for developers, ensuring compliance with regulatory standards and fostering a user-centric approach to self-adaptive privacy design tailored to diverse user groups, ultimately enhancing satisfaction and confidence in cloud services.}, } @article {pmid38794042, year = {2024}, author = {Zimmerleiter, R and Greibl, W and Meininger, G and Duswald, K and Hannesschläger, G and Gattinger, P and Rohm, M and Fuczik, C and Holzer, R and Brandstetter, M}, title = {Sensor for Rapid In-Field Classification of Cannabis Samples Based on Near-Infrared Spectroscopy.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {10}, pages = {}, pmid = {38794042}, issn = {1424-8220}, support = {FO999886335//Austrian Research Promotion Agency/ ; }, mesh = {*Cannabis/chemistry/classification ; *Spectroscopy, Near-Infrared/methods ; Discriminant Analysis ; Least-Squares Analysis ; Humans ; Dronabinol/analysis ; }, abstract = {A rugged handheld sensor for rapid in-field classification of cannabis samples based on their THC content using ultra-compact near-infrared spectrometer technology is presented. The device is designed for use by the Austrian authorities to discriminate between legal and illegal cannabis samples directly at the place of intervention. Hence, the sensor allows direct measurement through commonly encountered transparent plastic packaging made from polypropylene or polyethylene without any sample preparation. The measurement time is below 20 s. Measured spectral data are evaluated using partial least squares discriminant analysis directly on the device's hardware, eliminating the need for internet connectivity for cloud computing. The classification result is visually indicated directly on the sensor via a colored LED. Validation of the sensor is performed on an independent data set acquired by non-expert users after a short introduction. Despite the challenging setting, the achieved classification accuracy is higher than 80%. Therefore, the handheld sensor has the potential to reduce the number of unnecessarily confiscated legal cannabis samples, which would lead to significant monetary savings for the authorities.}, } @article {pmid38794035, year = {2024}, author = {Lin, J and Guan, Y}, title = {Load Prediction in Double-Channel Residual Self-Attention Temporal Convolutional Network with Weight Adaptive Updating in Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {10}, pages = {}, pmid = {38794035}, issn = {1424-8220}, abstract = {When resource demand increases and decreases rapidly, container clusters in the cloud environment need to respond to the number of containers in a timely manner to ensure service quality. Resource load prediction is a prominent challenge issue with the widespread adoption of cloud computing. A novel cloud computing load prediction method has been proposed, the Double-channel residual Self-attention Temporal convolutional Network with Weight adaptive updating (DSTNW), in order to make the response of the container cluster more rapid and accurate. A Double-channel Temporal Convolution Network model (DTN) has been developed to capture long-term sequence dependencies and enhance feature extraction capabilities when the model handles long load sequences. Double-channel dilated causal convolution has been adopted to replace the single-channel dilated causal convolution in the DTN. A residual temporal self-attention mechanism (SM) has been proposed to improve the performance of the network and focus on features with significant contributions from the DTN. DTN and SM jointly constitute a dual-channel residual self-attention temporal convolutional network (DSTN). In addition, by evaluating the accuracy aspects of single and stacked DSTNs, an adaptive weight strategy has been proposed to assign corresponding weights for the single and stacked DSTNs, respectively. The experimental results highlight that the developed method has outstanding prediction performance for cloud computing in comparison with some state-of-the-art methods. The proposed method achieved an average improvement of 24.16% and 30.48% on the Container dataset and Google dataset, respectively.}, } @article {pmid38794018, year = {2024}, author = {Xie, Y and Meng, X and Nguyen, DT and Xiang, Z and Ye, G and Hu, L}, title = {A Discussion of Building a Smart SHM Platform for Long-Span Bridge Monitoring.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {10}, pages = {}, pmid = {38794018}, issn = {1424-8220}, support = {4000108996/13/NL/US//European Space Agency/ ; 4000116646/16/NL/US//European Space Agency/ ; }, abstract = {This paper explores the development of a smart Structural Health Monitoring (SHM) platform tailored for long-span bridge monitoring, using the Forth Road Bridge (FRB) as a case study. It discusses the selection of smart sensors available for real-time monitoring, the formulation of an effective data strategy encompassing the collection, processing, management, analysis, and visualization of monitoring data sets to support decision-making, and the establishment of a cost-effective and intelligent sensor network aligned with the objectives set through comprehensive communication with asset owners. Due to the high data rates and dense sensor installations, conventional processing techniques are inadequate for fulfilling monitoring functionalities and ensuring security. Cloud-computing emerges as a widely adopted solution for processing and storing vast monitoring data sets. Drawing from the authors' experience in implementing long-span bridge monitoring systems in the UK and China, this paper compares the advantages and limitations of employing cloud- computing for long-span bridge monitoring. Furthermore, it explores strategies for developing a robust data strategy and leveraging artificial intelligence (AI) and digital twin (DT) technologies to extract relevant information or patterns regarding asset health conditions. This information is then visualized through the interaction between physical and virtual worlds, facilitating timely and informed decision-making in managing critical road transport infrastructure.}, } @article {pmid38786560, year = {2024}, author = {Peralta, T and Menoscal, M and Bravo, G and Rosado, V and Vaca, V and Capa, D and Mulas, M and Jordá-Bordehore, L}, title = {Rock Slope Stability Analysis Using Terrestrial Photogrammetry and Virtual Reality on Ignimbritic Deposits.}, journal = {Journal of imaging}, volume = {10}, number = {5}, pages = {}, pmid = {38786560}, issn = {2313-433X}, abstract = {Puerto de Cajas serves as a vital high-altitude passage in Ecuador, connecting the coastal region to the city of Cuenca. The stability of this rocky massif is carefully managed through the assessment of blocks and discontinuities, ensuring safe travel. This study presents a novel approach, employing rapid and cost-effective methods to evaluate an unexplored area within the protected expanse of Cajas. Using terrestrial photogrammetry and strategically positioned geomechanical stations along the slopes, we generated a detailed point cloud capturing elusive terrain features. We have used terrestrial photogrammetry for digitalization of the slope. Validation of the collected data was achieved by comparing directional data from Cloud Compare software with manual readings using a digital compass integrated in a phone at control points. The analysis encompasses three slopes, employing the SMR, Q-slope, and kinematic methodologies. Results from the SMR system closely align with kinematic analysis, indicating satisfactory slope quality. Nonetheless, continued vigilance in stability control remains imperative for ensuring road safety and preserving the site's integrity. Moreover, this research lays the groundwork for the creation of a publicly accessible 3D repository, enhancing visualization capabilities through Google Virtual Reality. This initiative not only aids in replicating the findings but also facilitates access to an augmented reality environment, thereby fostering collaborative research endeavors.}, } @article {pmid38776642, year = {2024}, author = {Li, X and Zhao, P and Liang, M and Ji, X and Zhang, D and Xie, Z}, title = {Dynamics changes of coastal aquaculture ponds based on the Google Earth Engine in Jiangsu Province, China.}, journal = {Marine pollution bulletin}, volume = {203}, number = {}, pages = {116502}, doi = {10.1016/j.marpolbul.2024.116502}, pmid = {38776642}, issn = {1879-3363}, mesh = {China ; *Aquaculture ; *Ponds ; *Environmental Monitoring/methods ; Remote Sensing Technology ; }, abstract = {Monitoring the spatiotemporal variation in coastal aquaculture zones is essential to providing a scientific basis for formulating scientifically reasonable land management policies. This study uses the Google Earth Engine (GEE) remote sensing cloud platform to extract aquaculture information based on Landsat series and Sentinel-2 images for the six years of 1984 to 2021 (1984, 1990, 2000, 2010, 2016 and 2021), so as to analyze the changes in the coastal aquaculture pond area, along with its spatiotemporal characteristics, of Jiangsu Province. The overall area of coastal aquaculture ponds in Jiangsu shows an increasing trend in the early period and a decreasing trend in the later period. Over the past 37 years, the area of coastal aquaculture ponds has increased by a total of 54,639.73 ha. This study can provide basic data for the sustainable development of coastal aquaculture in Jiangsu, and a reference for related studies in other regions.}, } @article {pmid38771196, year = {2024}, author = {Hulagappa Nebagiri, M and Pillappa Hnumanthappa, L}, title = {Fractional social optimization-based migration and replica management algorithm for load balancing in distributed file system for cloud computing.}, journal = {Network (Bristol, England)}, volume = {}, number = {}, pages = {1-28}, doi = {10.1080/0954898X.2024.2353665}, pmid = {38771196}, issn = {1361-6536}, abstract = {Effective management of data is a major issue in Distributed File System (DFS), like the cloud. This issue is handled by replicating files in an effective manner, which can minimize the time of data access and elevate the data availability. This paper devises a Fractional Social Optimization Algorithm (FSOA) for replica management along with balancing load in DFS in the cloud stage. Balancing the workload for DFS is the main objective. Here, the chunk creation is done by partitioning the file into a different number of chunks considering Deep Fuzzy Clustering (DFC) and then in the round-robin manner the Virtual machine (VM) is assigned. In that case for balancing the load considering certain objectives like resource use, energy consumption and migration cost thereby the load balancing is performed with the proposed FSOA. Here, the FSOA is formulated by uniting the Social optimization algorithm (SOA) and Fractional Calculus (FC). The replica management is done in DFS using the proposed FSOA by considering the various objectives. The FSOA has the smallest load of 0.299, smallest cost of 0.395, smallest energy consumption of 0.510, smallest overhead of 0.358, and smallest throughput of 0.537.}, } @article {pmid38770301, year = {2024}, author = {Qureshi, KM and Mewada, BG and Kaur, S and Khan, A and Al-Qahtani, MM and Qureshi, MRNM}, title = {Investigating industry 4.0 technologies in logistics 4.0 usage towards sustainable manufacturing supply chain.}, journal = {Heliyon}, volume = {10}, number = {10}, pages = {e30661}, pmid = {38770301}, issn = {2405-8440}, abstract = {In the era of Industry 4.0 (I4.0), automation and data analysis have undergone significant advancements, greatly impacting production management and operations management. Technologies such as the Internet of Things (IoT), robotics, cloud computing (CC), and big data, have played a crucial role in shaping Logistics 4.0 (L4.0) and improving the efficiency of the manufacturing supply chain (SC), ultimately contributing to sustainability goals. The present research investigates the role of I4.0 technologies within the framework of the extended theory of planned behavior (ETPB). The research explores various variables including subjective norms, attitude, perceived behavior control, leading to word-of-mouth, and purchase intention. By modeling these variables, the study aims to understand the influence of I4.0 technologies on L4.0 to establish a sustainable manufacturing SC. A questionnaire was administered to gather input from small and medium-sized firms (SMEs) in the manufacturing industry. An empirical study along with partial least squares structural equation modeling (SEM), was conducted to analyze the data. The findings indicate that the use of I4.0 technology in L4.0 influences subjective norms, which subsequently influence attitudes and personal behavior control. This, in turn, leads to word-of-mouth and purchase intention. The results provide valuable insights for shippers and logistics service providers empowering them to enhance their performance and contribute to achieving sustainability objectives. Consequently, this study contributes to promoting sustainability in the manufacturing SC by stimulating the adoption of I4.0 technologies in L4.0.}, } @article {pmid38768167, year = {2024}, author = {Vo, DH and Vo, AT and Dinh, CT and Tran, NP}, title = {Corporate restructuring and firm performance in Vietnam: The moderating role of digital transformation.}, journal = {PloS one}, volume = {19}, number = {5}, pages = {e0303491}, pmid = {38768167}, issn = {1932-6203}, mesh = {Vietnam ; Humans ; *Commerce ; Information Technology ; }, abstract = {In the digital age, firms should continually innovate and adapt to remain competitive and enhance performance. Innovation and adaptation require firms to take a holistic approach to their corporate structuring to ensure efficiency and effectiveness to stay competitive. This study examines how corporate restructuring impacts firm performance in Vietnam. We then investigate the moderating role of digital transformation in the corporate restructuring-firm performance nexus. We use content analysis, with a focus on particular terms, including "digitalization," "big data," "cloud computing," "blockchain," and "information technology" for 11 years, from 2011 to 2021. The frequency index from these keywords is developed to proxy the digital transformation for the Vietnamese listed firms. A final sample includes 118 Vietnamese listed firms with sufficient data for the analysis using the generalized method of moments (GMM) approach. The results indicate that corporate restructuring, including financial, portfolio, and operational restructuring, has a negative effect on firm performance in Vietnam. Digital transformation also negatively affects firm performance. However, corporate restructuring implemented in conjunction with digital transformation improves the performance of Vietnamese listed firms. These findings largely remain unchanged across various robustness analyses.}, } @article {pmid38753476, year = {2024}, author = {Gupta, I and Saxena, D and Singh, AK and Lee, CN}, title = {A Multiple Controlled Toffoli Driven Adaptive Quantum Neural Network Model for Dynamic Workload Prediction in Cloud Environments.}, journal = {IEEE transactions on pattern analysis and machine intelligence}, volume = {46}, number = {12}, pages = {7574-7588}, doi = {10.1109/TPAMI.2024.3402061}, pmid = {38753476}, issn = {1939-3539}, abstract = {The key challenges in cloud computing encompass dynamic resource scaling, load balancing, and power consumption. Accurate workload prediction is identified as a crucial strategy to address these challenges. Despite numerous methods proposed to tackle this issue, existing approaches fall short of capturing the high-variance nature of volatile and dynamic cloud workloads. Consequently, this paper introduces a novel model aimed at addressing this limitation. This paper presents a novel Multiple Controlled Toffoli-driven Adaptive Quantum Neural Network (MCT-AQNN) model to establish an empirical solution to complex, elastic as well as challenging workload prediction problems by optimizing the exploration, adaption, and exploitation proficiencies through quantum learning. The computational adaptability of quantum computing is ingrained with machine learning algorithms to derive more precise correlations from dynamic and complex workloads. The furnished input data point and hatched neural weights are refitted in the form of qubits while the controlling effects of Multiple Controlled Toffoli (MCT) gates are operated at the hidden and output layers of Quantum Neural Network (QNN) for enhancing learning capabilities. Complimentarily, a Uniformly Adaptive Quantum Machine Learning (UAQL) algorithm has evolved to functionally and effectually train the QNN. The extensive experiments are conducted and the comparisons are performed with state-of-the-art methods using four real-world benchmark datasets. Experimental results evince that MCT-AQNN has up to 32%-96% higher accuracy than the existing approaches.}, } @article {pmid38749656, year = {2024}, author = {Koenig, Z and Yohannes, MT and Nkambule, LL and Zhao, X and Goodrich, JK and Kim, HA and Wilson, MW and Tiao, G and Hao, SP and Sahakian, N and Chao, KR and Walker, MA and Lyu, Y and , and Rehm, HL and Neale, BM and Talkowski, ME and Daly, MJ and Brand, H and Karczewski, KJ and Atkinson, EG and Martin, AR}, title = {A harmonized public resource of deeply sequenced diverse human genomes.}, journal = {Genome research}, volume = {34}, number = {5}, pages = {796-809}, pmid = {38749656}, issn = {1549-5469}, support = {R01 MH115957/MH/NIMH NIH HHS/United States ; R37 MH107649/MH/NIMH NIH HHS/United States ; R01 HG012869/HG/NHGRI NIH HHS/United States ; K01 MH121659/MH/NIMH NIH HHS/United States ; R01 DE031261/DE/NIDCR NIH HHS/United States ; R00 MH117229/MH/NIMH NIH HHS/United States ; }, mesh = {Humans ; *Genome, Human ; *Databases, Genetic ; Human Genome Project ; High-Throughput Nucleotide Sequencing/methods ; Genetic Variation ; Genomics/methods ; }, abstract = {Underrepresented populations are often excluded from genomic studies owing in part to a lack of resources supporting their analyses. The 1000 Genomes Project (1kGP) and Human Genome Diversity Project (HGDP), which have recently been sequenced to high coverage, are valuable genomic resources because of the global diversity they capture and their open data sharing policies. Here, we harmonized a high-quality set of 4094 whole genomes from 80 populations in the HGDP and 1kGP with data from the Genome Aggregation Database (gnomAD) and identified over 153 million high-quality SNVs, indels, and SVs. We performed a detailed ancestry analysis of this cohort, characterizing population structure and patterns of admixture across populations, analyzing site frequency spectra, and measuring variant counts at global and subcontinental levels. We also show substantial added value from this data set compared with the prior versions of the component resources, typically combined via liftOver and variant intersection; for example, we catalog millions of new genetic variants, mostly rare, compared with previous releases. In addition to unrestricted individual-level public release, we provide detailed tutorials for conducting many of the most common quality-control steps and analyses with these data in a scalable cloud-computing environment and publicly release this new phased joint callset for use as a haplotype resource in phasing and imputation pipelines. This jointly called reference panel will serve as a key resource to support research of diverse ancestry populations.}, } @article {pmid38746269, year = {2024}, author = {Thiriveedhi, VK and Krishnaswamy, D and Clunie, D and Pieper, S and Kikinis, R and Fedorov, A}, title = {Cloud-based large-scale curation of medical imaging data using AI segmentation.}, journal = {Research square}, volume = {}, number = {}, pages = {}, pmid = {38746269}, issn = {2693-5015}, support = {HHSN261201500003C/CA/NCI NIH HHS/United States ; HHSN261201500003I/CA/NCI NIH HHS/United States ; P41 EB028741/EB/NIBIB NIH HHS/United States ; U24 CA258511/CA/NCI NIH HHS/United States ; }, abstract = {Rapid advances in medical imaging Artificial Intelligence (AI) offer unprecedented opportunities for automatic analysis and extraction of data from large imaging collections. Computational demands of such modern AI tools may be difficult to satisfy with the capabilities available on premises. Cloud computing offers the promise of economical access and extreme scalability. Few studies examine the price/performance tradeoffs of using the cloud, in particular for medical image analysis tasks. We investigate the use of cloud-provisioned compute resources for AI-based curation of the National Lung Screening Trial (NLST) Computed Tomography (CT) images available from the National Cancer Institute (NCI) Imaging Data Commons (IDC). We evaluated NCI Cancer Research Data Commons (CRDC) Cloud Resources - Terra (FireCloud) and Seven Bridges-Cancer Genomics Cloud (SB-CGC) platforms - to perform automatic image segmentation with TotalSegmentator and pyradiomics feature extraction for a large cohort containing >126,000 CT volumes from >26,000 patients. Utilizing >21,000 Virtual Machines (VMs) over the course of the computation we completed analysis in under 9 hours, as compared to the estimated 522 days that would be needed on a single workstation. The total cost of utilizing the cloud for this analysis was $1,011.05. Our contributions include: 1) an evaluation of the numerous tradeoffs towards optimizing the use of cloud resources for large-scale image analysis; 2) CloudSegmentator, an open source reproducible implementation of the developed workflows, which can be reused and extended; 3) practical recommendations for utilizing the cloud for large-scale medical image computing tasks. We also share the results of the analysis: the total of 9,565,554 segmentations of the anatomic structures and the accompanying radiomics features in IDC as of release v18.}, } @article {pmid38743439, year = {2024}, author = {Philippou, J and Yáñez Feliú, G and Rudge, TJ}, title = {WebCM: A Web-Based Platform for Multiuser Individual-Based Modeling of Multicellular Microbial Populations and Communities.}, journal = {ACS synthetic biology}, volume = {13}, number = {6}, pages = {1952-1955}, pmid = {38743439}, issn = {2161-5063}, mesh = {*Software ; *Internet ; Computer Simulation ; User-Computer Interface ; Models, Biological ; }, abstract = {WebCM is a web platform that enables users to create, edit, run, and view individual-based simulations of multicellular microbial populations and communities on a remote compute server. WebCM builds upon the simulation software CellModeller in the back end and provides users with a web-browser-based modeling interface including model editing, execution, and playback. Multiple users can run and manage multiple simulations simultaneously, sharing the host hardware. Since it is based on CellModeller, it can utilize both GPU and CPU parallelization. The user interface provides real-time interactive 3D graphical representations for inspection of simulations at all time points, and the results can be downloaded for detailed offline analysis. It can be run on cloud computing services or on a local server, allowing collaboration within and between laboratories.}, } @article {pmid38733003, year = {2024}, author = {Lin, Z and Liang, J}, title = {Edge Caching Data Distribution Strategy with Minimum Energy Consumption.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {9}, pages = {}, pmid = {38733003}, issn = {1424-8220}, support = {61862003//National Natural Science Foundation of China/ ; }, abstract = {In the context of the rapid development of the Internet of Vehicles, virtual reality, automatic driving and the industrial Internet, the terminal devices in the network show explosive growth. As a result, more and more information is generated from the edge of the network, which makes the data throughput increase dramatically in the mobile communication network. As the key technology of the fifth-generation mobile communication network, mobile edge caching technology which caches popular data to the edge server deployed at the edge of the network avoids the data transmission delay of the backhaul link and the occurrence of network congestion. With the growing scale of the network, distributing hot data from cloud servers to edge servers will generate huge energy consumption. To realize the green and sustainable development of the communication industry and reduce the energy consumption of distribution of data that needs to be cached in edge servers, we make the first attempt to propose and solve the problem of edge caching data distribution with minimum energy consumption (ECDDMEC) in this paper. First, we model and formulate the problem as a constrained optimization problem and then prove its NP-hardness. Subsequently, we design a greedy algorithm with computational complexity of O(n2) to solve the problem approximately. Experimental results show that compared with the distribution strategy of each edge server directly requesting data from the cloud server, the strategy obtained by the algorithm can significantly reduce the energy consumption of data distribution.}, } @article {pmid38732864, year = {2024}, author = {Emvoliadis, A and Vryzas, N and Stamatiadou, ME and Vrysis, L and Dimoulas, C}, title = {Multimodal Environmental Sensing Using AI & IoT Solutions: A Cognitive Sound Analysis Perspective.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {9}, pages = {}, pmid = {38732864}, issn = {1424-8220}, abstract = {This study presents a novel audio compression technique, tailored for environmental monitoring within multi-modal data processing pipelines. Considering the crucial role that audio data play in environmental evaluations, particularly in contexts with extreme resource limitations, our strategy substantially decreases bit rates to facilitate efficient data transfer and storage. This is accomplished without undermining the accuracy necessary for trustworthy air pollution analysis while simultaneously minimizing processing expenses. More specifically, our approach fuses a Deep-Learning-based model, optimized for edge devices, along with a conventional coding schema for audio compression. Once transmitted to the cloud, the compressed data undergo a decoding process, leveraging vast cloud computing resources for accurate reconstruction and classification. The experimental results indicate that our approach leads to a relatively minor decrease in accuracy, even at notably low bit rates, and demonstrates strong robustness in identifying data from labels not included in our training dataset.}, } @article {pmid38732863, year = {2024}, author = {Hanczewski, S and Stasiak, M and Weissenberg, M}, title = {An Analytical Model of IaaS Architecture for Determining Resource Utilization.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {9}, pages = {}, pmid = {38732863}, issn = {1424-8220}, support = {Grant 0313/SBAD/1312//Ministry of Education and Science/ ; }, abstract = {Cloud computing has become a major component of the modern IT ecosystem. A key contributor to this has been the development of Infrastructure as a Service (IaaS) architecture, in which users' virtual machines (VMs) are run on the service provider's physical infrastructure, making it possible to become independent of the need to purchase one's own physical machines (PMs). One of the main aspects to consider when designing such systems is achieving the optimal utilization of individual resources, such as processor, RAM, disk, and available bandwidth. In response to these challenges, the authors developed an analytical model (the ARU method) to determine the average utilization levels of the aforementioned resources. The effectiveness of the proposed analytical model was evaluated by comparing the results obtained by utilizing the model with those obtained by conducting a digital simulation of the operation of a cloud system according to the IaaS paradigm. The results show the effectiveness of the model regardless of the structure of the emerging requests, the variability of the capacity of individual resources, and the number of physical machines in the system. This translates into the applicability of the model in the design process of cloud systems.}, } @article {pmid38719856, year = {2024}, author = {Kent, RM and Barbosa, WAS and Gauthier, DJ}, title = {Controlling chaos using edge computing hardware.}, journal = {Nature communications}, volume = {15}, number = {1}, pages = {3886}, pmid = {38719856}, issn = {2041-1723}, support = {FA9550-22-1-0203//United States Department of Defense | U.S. Air Force (United States Air Force)/ ; }, abstract = {Machine learning provides a data-driven approach for creating a digital twin of a system - a digital model used to predict the system behavior. Having an accurate digital twin can drive many applications, such as controlling autonomous systems. Often, the size, weight, and power consumption of the digital twin or related controller must be minimized, ideally realized on embedded computing hardware that can operate without a cloud-computing connection. Here, we show that a nonlinear controller based on next-generation reservoir computing can tackle a difficult control problem: controlling a chaotic system to an arbitrary time-dependent state. The model is accurate, yet it is small enough to be evaluated on a field-programmable gate array typically found in embedded devices. Furthermore, the model only requires 25.0 ± 7.0 nJ per evaluation, well below other algorithms, even without systematic power optimization. Our work represents the first step in deploying efficient machine learning algorithms to the computing "edge."}, } @article {pmid38711808, year = {2024}, author = {Buchanan, BC and Tang, Y and Lopez, H and Casanova, NG and Garcia, JGN and Yoon, JY}, title = {Development of a cloud-based flow rate tool for eNAMPT biomarker detection.}, journal = {PNAS nexus}, volume = {3}, number = {5}, pages = {pgae173}, pmid = {38711808}, issn = {2752-6542}, support = {P01 HL126609/HL/NHLBI NIH HHS/United States ; R01 HL141387/HL/NHLBI NIH HHS/United States ; }, abstract = {Increased levels of extracellular nicotinamide phosphoribosyltransferase (eNAMPT) are increasingly recognized as a highly useful biomarker of inflammatory disease and disease severity. In preclinical animal studies, a monoclonal antibody that neutralizes eNAMPT has been generated to successfully reduce the extent of inflammatory cascade activation. Thus, the rapid detection of eNAMPT concentration in plasma samples at the point of care (POC) would be of great utility in assessing the benefit of administering an anti-eNAMPT therapeutic. To determine the feasibility of this POC test, we conducted a particle immunoagglutination assay on a paper microfluidic platform and quantified its extent with a flow rate measurement in less than 1 min. A smartphone and cloud-based Google Colab were used to analyze the flow rates automatically. A horizontal flow model and an immunoagglutination binding model were evaluated to optimize the detection time, sample dilution, and particle concentration. This assay successfully detected eNAMPT in both human whole blood and plasma samples (diluted to 10 and 1%), with the limit of detection of 1-20 pg/mL (equivalent to 0.1-0.2 ng/mL in undiluted blood and plasma) and a linear range of 5-40 pg/mL. Furthermore, the smartphone POC assay distinguished clinical samples with low, mid, and high eNAMPT concentrations. Together, these results indicate this POC assay, which utilizes low-cost materials, time-effective methods, and a straightforward immunoassay (without surface immobilization), may reliably allow rapid determination of eNAMPT blood/plasma levels to advantage patient stratification in clinical trials and guide ALT-100 mAb therapeutic decision-making.}, } @article {pmid38707321, year = {2024}, author = {Sankar M S, K and Gupta, S and Luthra, S and Kumar, A and Jagtap, S and Samadhiya, A}, title = {Empowering sustainable manufacturing: Unleashing digital innovation in spool fabrication industries.}, journal = {Heliyon}, volume = {10}, number = {9}, pages = {e29994}, pmid = {38707321}, issn = {2405-8440}, abstract = {In industrial landscapes, spool fabrication industries play a crucial role in the successful completion of numerous industrial projects by providing prefabricated modules. However, the implementation of digitalized sustainable practices in spool fabrication industries is progressing slowly and is still in its embryonic stage due to several challenges. To implement digitalized sustainable manufacturing (SM), digital technologies such as Internet of Things, Cloud computing, Big data analytics, Cyber-physical systems, Augmented reality, Virtual reality, and Machine learning are required in the context of sustainability. The scope of the present study entails prioritization of the enablers that promote the implementation of digitalized sustainable practices in spool fabrication industries using the Improved Fuzzy Stepwise Weight Assessment Ratio Analysis (IMF-SWARA) method integrated with Triangular Fuzzy Bonferroni Mean (TFBM). The enablers are identified through a systematic literature review and are validated by a team of seven experts through a questionnaire survey. Then the finally identified enablers are analyzed by the IMF-SWARA and TFBM integrated approach. The results indicate that the most significant enablers are management support, leadership, governmental policies and regulations to implement digitalized SM. The study provides a comprehensive analysis of digital SM enablers in the spool fabrication industry and offers guidelines for the transformation of conventional systems into digitalized SM practices.}, } @article {pmid38705837, year = {2024}, author = {Mishra, A and Kim, HS and Kumar, R and Srivastava, V}, title = {Advances in Vibrio-related infection management: an integrated technology approach for aquaculture and human health.}, journal = {Critical reviews in biotechnology}, volume = {44}, number = {8}, pages = {1610-1637}, doi = {10.1080/07388551.2024.2336526}, pmid = {38705837}, issn = {1549-7801}, mesh = {Animals ; Humans ; *Aquaculture/methods ; *Vibrio ; *Vibrio Infections/therapy/veterinary ; }, abstract = {Vibrio species pose significant threats worldwide, causing mortalities in aquaculture and infections in humans. Global warming and the emergence of worldwide strains of Vibrio diseases are increasing day by day. Control of Vibrio species requires effective monitoring, diagnosis, and treatment strategies at the global scale. Despite current efforts based on chemical, biological, and mechanical means, Vibrio control management faces limitations due to complicated implementation processes. This review explores the intricacies and challenges of Vibrio-related diseases, including accurate and cost-effective diagnosis and effective control. The global burden due to emerging Vibrio species further complicates management strategies. We propose an innovative integrated technology model that harnesses cutting-edge technologies to address these obstacles. The proposed model incorporates advanced tools, such as biosensing technologies, the Internet of Things (IoT), remote sensing devices, cloud computing, and machine learning. This model offers invaluable insights and supports better decision-making by integrating real-time ecological data and biological phenotype signatures. A major advantage of our approach lies in leveraging cloud-based analytics programs, efficiently extracting meaningful information from vast and complex datasets. Collaborating with data and clinical professionals ensures logical and customized solutions tailored to each unique situation. Aquaculture biotechnology that prioritizes sustainability may have a large impact on human health and the seafood industry. Our review underscores the importance of adopting this model, revolutionizing the prognosis and management of Vibrio-related infections, even under complex circumstances. Furthermore, this model has promising implications for aquaculture and public health, addressing the United Nations Sustainable Development Goals and their development agenda.}, } @article {pmid38698084, year = {2024}, author = {Han, Y and Wei, Z and Huang, G}, title = {An imbalance data quality monitoring based on SMOTE-XGBOOST supported by edge computing.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {10151}, pmid = {38698084}, issn = {2045-2322}, support = {51975386//the National Natural Science Foundation of China/ ; N2022J014//Science and Technology Research and Development Program of China State Railway Group Co.,Ltd./ ; 2022020630-JH1/108//Science and Technology Program of Liaoning Province "Unveiling and Commanding"/ ; }, abstract = {Product assembly involves extensive production data that is characterized by high dimensionality, multiple samples, and data imbalance. The article proposes an edge computing-based framework for monitoring product assembly quality in industrial Internet of Things. Edge computing technology relieves the pressure of aggregating enormous amounts of data to cloud center for processing. To address the problem of data imbalance, we compared five sampling methods: Borderline SMOTE, Random Downsampling, Random Upsampling, SMOTE, and ADASYN. Finally, the quality monitoring model SMOTE-XGBoost is proposed, and the hyperparameters of the model are optimized by using the Grid Search method. The proposed framework and quality control methodology were applied to an assembly line of IGBT modules for the traction system, and the validity of the model was experimentally verified.}, } @article {pmid38696761, year = {2024}, author = {Peccoud, S and Berezin, CT and Hernandez, SI and Peccoud, J}, title = {PlasCAT: Plasmid Cloud Assembly Tool.}, journal = {Bioinformatics (Oxford, England)}, volume = {40}, number = {5}, pages = {}, pmid = {38696761}, issn = {1367-4811}, support = {#MCB-2123367//National Science Foundation/ ; T32 GM132057/GM/NIGMS NIH HHS/United States ; R01 GM147816/GM/NIGMS NIH HHS/United States ; R21 AI168482/AI/NIAID NIH HHS/United States ; //Suzanne and Walter Scott Foundation/ ; }, mesh = {*Software ; *Plasmids/genetics ; Cloud Computing ; Computational Biology/methods ; Sequence Analysis, DNA/methods ; Internet ; }, abstract = {SUMMARY: PlasCAT (Plasmid Cloud Assembly Tool) is an easy-to-use cloud-based bioinformatics tool that enables de novo plasmid sequence assembly from raw sequencing data. Nontechnical users can now assemble sequences from long reads and short reads without ever touching a line of code. PlasCAT uses high-performance computing servers to reduce run times on assemblies and deliver results faster.

PlasCAT is freely available on the web at https://sequencing.genofab.com. The assembly pipeline source code and server code are available for download at https://bitbucket.org/genofabinc/workspace/projects/PLASCAT. Click the Cancel button to access the source code without authenticating. Web servers implemented in React.js and Python, with all major browsers supported.}, } @article {pmid38695012, year = {2024}, author = {Blindenbach, J and Kang, J and Hong, S and Karam, C and Lehner, T and Gürsoy, G}, title = {Ultra-secure storage and analysis of genetic data for the advancement of precision medicine.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {38695012}, issn = {2692-8205}, support = {R00 HG010909/HG/NHGRI NIH HHS/United States ; R35 GM147004/GM/NIGMS NIH HHS/United States ; }, abstract = {Cloud computing provides the opportunity to store the ever-growing genotype-phenotype data sets needed to achieve the full potential of precision medicine. However, due to the sensitive nature of this data and the patchwork of data privacy laws across states and countries, additional security protections are proving necessary to ensure data privacy and security. Here we present SQUiD, a secure queryable database for storing and analyzing genotype-phenotype data. With SQUiD, genotype-phenotype data can be stored in a low-security, low-cost public cloud in the encrypted form, which researchers can securely query without the public cloud ever being able to decrypt the data. We demonstrate the usability of SQUiD by replicating various commonly used calculations such as polygenic risk scores, cohort creation for GWAS, MAF filtering, and patient similarity analysis both on synthetic and UK Biobank data. Our work represents a new and scalable platform enabling the realization of precision medicine without security and privacy concerns.}, } @article {pmid38682960, year = {2024}, author = {Drmota, P and Nadlinger, DP and Main, D and Nichol, BC and Ainley, EM and Leichtle, D and Mantri, A and Kashefi, E and Srinivas, R and Araneda, G and Ballance, CJ and Lucas, DM}, title = {Verifiable Blind Quantum Computing with Trapped Ions and Single Photons.}, journal = {Physical review letters}, volume = {132}, number = {15}, pages = {150604}, doi = {10.1103/PhysRevLett.132.150604}, pmid = {38682960}, issn = {1079-7114}, abstract = {We report the first hybrid matter-photon implementation of verifiable blind quantum computing. We use a trapped-ion quantum server and a client-side photonic detection system networked via a fiber-optic quantum link. The availability of memory qubits and deterministic entangling gates enables interactive protocols without postselection-key requirements for any scalable blind server, which previous realizations could not provide. We quantify the privacy at ≲0.03 leaked classical bits per qubit. This experiment demonstrates a path to fully verified quantum computing in the cloud.}, } @article {pmid38682533, year = {2024}, author = {Schweitzer, M and Ostheimer, P and Lins, A and Romano, V and Steger, B and Baumgarten, D and Augustin, M}, title = {Transforming Tele-Ophthalmology: Utilizing Cloud Computing for Remote Eye Care.}, journal = {Studies in health technology and informatics}, volume = {313}, number = {}, pages = {215-220}, doi = {10.3233/SHTI240040}, pmid = {38682533}, issn = {1879-8365}, mesh = {*Cloud Computing ; *Ophthalmology ; *Telemedicine ; Humans ; Radiology Information Systems ; Information Storage and Retrieval/methods ; }, abstract = {BACKGROUND: Tele-ophthalmology is gaining recognition for its role in improving eye care accessibility via cloud-based solutions. The Google Cloud Platform (GCP) Healthcare API enables secure and efficient management of medical image data such as high-resolution ophthalmic images.

OBJECTIVES: This study investigates cloud-based solutions' effectiveness in tele-ophthalmology, with a focus on GCP's role in data management, annotation, and integration for a novel imaging device.

METHODS: Leveraging the Integrating the Healthcare Enterprise (IHE) Eye Care profile, the cloud platform was utilized as a PACS and integrated with the Open Health Imaging Foundation (OHIF) Viewer for image display and annotation capabilities for ophthalmic images.

RESULTS: The setup of a GCP DICOM storage and the OHIF Viewer facilitated remote image data analytics. Prolonged loading times and relatively large individual image file sizes indicated system challenges.

CONCLUSION: Cloud platforms have the potential to ease distributed data analytics, as needed for efficient tele-ophthalmology scenarios in research and clinical practice, by providing scalable and secure image management solutions.}, } @article {pmid38677415, year = {2024}, author = {van der Laan, E and Hazenberg, P and Weerts, AH}, title = {Simulation of long-term storage dynamics of headwater reservoirs across the globe using public cloud computing infrastructure.}, journal = {The Science of the total environment}, volume = {931}, number = {}, pages = {172678}, doi = {10.1016/j.scitotenv.2024.172678}, pmid = {38677415}, issn = {1879-1026}, abstract = {Reservoirs play an important role in relation to water security, flood risk, hydropower and natural flow regime. This study derives a novel dataset with a long-term daily water-balance (reservoir volume, inflow, outflow, evaporation and precipitation) of headwater reservoirs and storage dynamics across the globe. The data is generated using cloud computing infrastructure and a high resolution distributed hydrological model wflow_sbm. Model results are validated against earth observed surface water area and in-situ measured reservoir volume and show an overall good model performance. Simulated headwater reservoir storage indicate that 19.4-24.4 % of the reservoirs had a significant decrease in storage. This change is mainly driven by a decrease in reservoir inflow and increase in evaporation. Deployment on a kubernetes cloud environment and using reproducible workflows shows that these kind of simulations and analyses can be conducted in less than a day.}, } @article {pmid38676279, year = {2024}, author = {Abdullahi, I and Longo, S and Samie, M}, title = {Towards a Distributed Digital Twin Framework for Predictive Maintenance in Industrial Internet of Things (IIoT).}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {8}, pages = {}, pmid = {38676279}, issn = {1424-8220}, abstract = {This study uses a wind turbine case study as a subdomain of Industrial Internet of Things (IIoT) to showcase an architecture for implementing a distributed digital twin in which all important aspects of a predictive maintenance solution in a DT use a fog computing paradigm, and the typical predictive maintenance DT is improved to offer better asset utilization and management through real-time condition monitoring, predictive analytics, and health management of selected components of wind turbines in a wind farm. Digital twin (DT) is a technology that sits at the intersection of Internet of Things, Cloud Computing, and Software Engineering to provide a suitable tool for replicating physical objects in the digital space. This can facilitate the implementation of asset management in manufacturing systems through predictive maintenance solutions leveraged by machine learning (ML). With DTs, a solution architecture can easily use data and software to implement asset management solutions such as condition monitoring and predictive maintenance using acquired sensor data from physical objects and computing capabilities in the digital space. While DT offers a good solution, it is an emerging technology that could be improved with better standards, architectural framework, and implementation methodologies. Researchers in both academia and industry have showcased DT implementations with different levels of success. However, DTs remain limited in standards and architectures that offer efficient predictive maintenance solutions with real-time sensor data and intelligent DT capabilities. An appropriate feedback mechanism is also needed to improve asset management operations.}, } @article {pmid38668979, year = {2024}, author = {Hsiao, J and Deng, LC and Moroz, LL and Chalasani, SH and Edsinger, E}, title = {Ocean to Tree: Leveraging Single-Molecule RNA-Seq to Repair Genome Gene Models and Improve Phylogenomic Analysis of Gene and Species Evolution.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2757}, number = {}, pages = {461-490}, pmid = {38668979}, issn = {1940-6029}, support = {R01 NS114491/NS/NINDS NIH HHS/United States ; R21 MH119646/MH/NIMH NIH HHS/United States ; }, mesh = {Animals ; *Phylogeny ; *Evolution, Molecular ; *RNA-Seq/methods ; *Ctenophora/genetics/classification ; Genome/genetics ; Computational Biology/methods ; Software ; Genomics/methods ; Models, Genetic ; }, abstract = {Understanding gene evolution across genomes and organisms, including ctenophores, can provide unexpected biological insights. It enables powerful integrative approaches that leverage sequence diversity to advance biomedicine. Sequencing and bioinformatic tools can be inexpensive and user-friendly, but numerous options and coding can intimidate new users. Distinct challenges exist in working with data from diverse species but may go unrecognized by researchers accustomed to gold-standard genomes. Here, we provide a high-level workflow and detailed pipeline to enable animal collection, single-molecule sequencing, and phylogenomic analysis of gene and species evolution. As a demonstration, we focus on (1) PacBio RNA-seq of the genome-sequenced ctenophore Mnemiopsis leidyi, (2) diversity and evolution of the mechanosensitive ion channel Piezo in genetic models and basal-branching animals, and (3) associated challenges and solutions to working with diverse species and genomes, including gene model updating and repair using single-molecule RNA-seq. We provide a Python Jupyter Notebook version of our pipeline (GitHub Repository: Ctenophore-Ocean-To-Tree-2023 https://github.com/000generic/Ctenophore-Ocean-To-Tree-2023) that can be run for free in the Google Colab cloud to replicate our findings or modified for specific or greater use. Our protocol enables users to design new sequencing projects in ctenophores, marine invertebrates, or other novel organisms. It provides a simple, comprehensive platform that can ease new user entry into running their evolutionary sequence analyses.}, } @article {pmid38665579, year = {2024}, author = {El Jaouhari, A and Arif, J and Samadhiya, A and Naz, F and Kumar, A}, title = {Exploring the application of ICTs in decarbonizing the agriculture supply chain: A literature review and research agenda.}, journal = {Heliyon}, volume = {10}, number = {8}, pages = {e29564}, pmid = {38665579}, issn = {2405-8440}, abstract = {The contemporary agricultural supply chain necessitates the integration of information and communication technologies to effectively mitigate the multifaceted challenges posed by climate change and rising global demand for food products. Furthermore, recent developments in information and communication technologies, such as blockchain, big data analytics, the internet of things, artificial intelligence, cloud computing, etc., have made this transformation possible. Each of these technologies plays a particular role in enabling the agriculture supply chain ecosystem to be intelligent enough to handle today's world's challenges. Thus, this paper reviews the crucial information and communication technologies-enabled agriculture supply chains to understand their potential uses and contemporary developments. The review is supported by 57 research papers from the Scopus database. Five research areas analyze the applications of the technology reviewed in the agriculture supply chain: food safety and traceability, security and information system management, wasting food, supervision and tracking, agricultural businesses and decision-making, and other applications not explicitly related to the agriculture supply chain. The study also emphasizes how information and communication technologies can help agriculture supply chains and promote agriculture supply chain decarbonization. An information and communication technologies application framework for a decarbonized agriculture supply chain is suggested based on the research's findings. The framework identifies the contribution of information and communication technologies to decision-making in agriculture supply chains. The review also offers guidelines to academics, policymakers, and practitioners on managing agriculture supply chains successfully for enhanced agricultural productivity and decarbonization.}, } @article {pmid38660213, year = {2024}, author = {Ullah, R and Yahya, M and Mostarda, L and Alshammari, A and Alutaibi, AI and Sarwar, N and Ullah, F and Ullah, S}, title = {Intelligent decision making for energy efficient fog nodes selection and smart switching in the IOT: a machine learning approach.}, journal = {PeerJ. Computer science}, volume = {10}, number = {}, pages = {e1833}, pmid = {38660213}, issn = {2376-5992}, abstract = {With the emergence of Internet of Things (IoT) technology, a huge amount of data is generated, which is costly to transfer to the cloud data centers in terms of security, bandwidth, and latency. Fog computing is an efficient paradigm for locally processing and manipulating IoT-generated data. It is difficult to configure the fog nodes to provide all of the services required by the end devices because of the static configuration, poor processing, and storage capacities. To enhance fog nodes' capabilities, it is essential to reconfigure them to accommodate a broader range and variety of hosted services. In this study, we focus on the placement of fog services and their dynamic reconfiguration in response to the end-device requests. Due to its growing successes and popularity in the IoT era, the Decision Tree (DT) machine learning model is implemented to predict the occurrence of requests and events in advance. The DT model enables the fog nodes to predict requests for a specific service in advance and reconfigure the fog node accordingly. The performance of the proposed model is evaluated in terms of high throughput, minimized energy consumption, and dynamic fog node smart switching. The simulation results demonstrate a notable increase in the fog node hit ratios, scaling up to 99% for the majority of services concurrently with a substantial reduction in miss ratios. Furthermore, the energy consumption is greatly reduced by over 50% as compared to a static node.}, } @article {pmid38660188, year = {2024}, author = {Cambronero, ME and Martínez, MA and Llana, L and Rodríguez, RJ and Russo, A}, title = {Towards a GDPR-compliant cloud architecture with data privacy controlled through sticky policies.}, journal = {PeerJ. Computer science}, volume = {10}, number = {}, pages = {e1898}, pmid = {38660188}, issn = {2376-5992}, abstract = {Data privacy is one of the biggest challenges facing system architects at the system design stage. Especially when certain laws, such as the General Data Protection Regulation (GDPR), have to be complied with by cloud environments. In this article, we want to help cloud providers comply with the GDPR by proposing a GDPR-compliant cloud architecture. To do this, we use model-driven engineering techniques to design cloud architecture and analyze cloud interactions. In particular, we develop a complete framework, called MDCT, which includes a Unified Modeling Language profile that allows us to define specific cloud scenarios and profile validation to ensure that certain required properties are met. The validation process is implemented through the Object Constraint Language (OCL) rules, which allow us to describe the constraints in these models. To comply with many GDPR articles, the proposed cloud architecture considers data privacy and data tracking, enabling safe and secure data management and tracking in the context of the cloud. For this purpose, sticky policies associated with the data are incorporated to define permission for third parties to access the data and track instances of data access. As a result, a cloud architecture designed with MDCT contains a set of OCL rules to validate it as a GDPR-compliant cloud architecture. Our tool models key GDPR points such as user consent/withdrawal, the purpose of access, and data transparency and auditing, and considers data privacy and data tracking with the help of sticky policies.}, } @article {pmid38660156, year = {2024}, author = {Hassan, SR and Rehman, AU and Alsharabi, N and Arain, S and Quddus, A and Hamam, H}, title = {Design of load-aware resource allocation for heterogeneous fog computing systems.}, journal = {PeerJ. Computer science}, volume = {10}, number = {}, pages = {e1986}, pmid = {38660156}, issn = {2376-5992}, abstract = {The execution of delay-aware applications can be effectively handled by various computing paradigms, including the fog computing, edge computing, and cloudlets. Cloud computing offers services in a centralized way through a cloud server. On the contrary, the fog computing paradigm offers services in a dispersed manner providing services and computational facilities near the end devices. Due to the distributed provision of resources by the fog paradigm, this architecture is suitable for large-scale implementation of applications. Furthermore, fog computing offers a reduction in delay and network load as compared to cloud architecture. Resource distribution and load balancing are always important tasks in deploying efficient systems. In this research, we have proposed heuristic-based approach that achieves a reduction in network consumption and delays by efficiently utilizing fog resources according to the load generated by the clusters of edge nodes. The proposed algorithm considers the magnitude of data produced at the edge clusters while allocating the fog resources. The results of the evaluations performed on different scales confirm the efficacy of the proposed approach in achieving optimal performance.}, } @article {pmid38658838, year = {2024}, author = {Wei, W and Xia, X and Li, T and Chen, Q and Feng, X}, title = {Shaoxia: a web-based interactive analysis platform for single cell RNA sequencing data.}, journal = {BMC genomics}, volume = {25}, number = {1}, pages = {402}, pmid = {38658838}, issn = {1471-2164}, support = {82170971//the National Natural Science Foundations of China/ ; YJ201987//Fundamental Research Funds for the Central Universities/ ; 2021ZYD0090//Sichuan Science and Technology Program/ ; QDJF2019-3//Scientific Research Foundation, West China Hospital of Stomatology Sichuan University/ ; CIFMS 2019-I2M-5-004//CAMS Innovation Fund for Medical Sciences/ ; }, mesh = {*Single-Cell Analysis/methods ; *Software ; *Sequence Analysis, RNA/methods ; Internet ; Humans ; Computational Biology/methods ; RNA-Seq/methods ; User-Computer Interface ; }, abstract = {BACKGROUND: In recent years, Single-cell RNA sequencing (scRNA-seq) is increasingly accessible to researchers of many fields. However, interpreting its data demands proficiency in multiple programming languages and bioinformatic skills, which limited researchers, without such expertise, exploring information from scRNA-seq data. Therefore, there is a tremendous need to develop easy-to-use software, covering all the aspects of scRNA-seq data analysis.

RESULTS: We proposed a clear analysis framework for scRNA-seq data, which emphasized the fundamental and crucial roles of cell identity annotation, abstracting the analysis process into three stages: upstream analysis, cell annotation and downstream analysis. The framework can equip researchers with a comprehensive understanding of the analysis procedure and facilitate effective data interpretation. Leveraging the developed framework, we engineered Shaoxia, an analysis platform designed to democratize scRNA-seq analysis by accelerating processing through high-performance computing capabilities and offering a user-friendly interface accessible even to wet-lab researchers without programming expertise.

CONCLUSION: Shaoxia stands as a powerful and user-friendly open-source software for automated scRNA-seq analysis, offering comprehensive functionality for streamlined functional genomics studies. Shaoxia is freely accessible at http://www.shaoxia.cloud , and its source code is publicly available at https://github.com/WiedenWei/shaoxia .}, } @article {pmid38638340, year = {2024}, author = {Abbas, Q and Alyas, T and Alghamdi, T and Alkhodre, AB and Albouq, S and Niazi, M and Tabassum, N}, title = {Redefining governance: a critical analysis of sustainability transformation in e-governance.}, journal = {Frontiers in big data}, volume = {7}, number = {}, pages = {1349116}, pmid = {38638340}, issn = {2624-909X}, abstract = {With the rapid growth of information and communication technologies, governments worldwide are embracing digital transformation to enhance service delivery and governance practices. In the rapidly evolving landscape of information technology (IT), secure data management stands as a cornerstone for organizations aiming to safeguard sensitive information. Robust data modeling techniques are pivotal in structuring and organizing data, ensuring its integrity, and facilitating efficient retrieval and analysis. As the world increasingly emphasizes sustainability, integrating eco-friendly practices into data management processes becomes imperative. This study focuses on the specific context of Pakistan and investigates the potential of cloud computing in advancing e-governance capabilities. Cloud computing offers scalability, cost efficiency, and enhanced data security, making it an ideal technology for digital transformation. Through an extensive literature review, analysis of case studies, and interviews with stakeholders, this research explores the current state of e-governance in Pakistan, identifies the challenges faced, and proposes a framework for leveraging cloud computing to overcome these challenges. The findings reveal that cloud computing can significantly enhance the accessibility, scalability, and cost-effectiveness of e-governance services, thereby improving citizen engagement and satisfaction. This study provides valuable insights for policymakers, government agencies, and researchers interested in the digital transformation of e-governance in Pakistan and offers a roadmap for leveraging cloud computing technologies in similar contexts. The findings contribute to the growing body of knowledge on e-governance and cloud computing, supporting the advancement of digital governance practices globally. This research identifies monitoring parameters necessary to establish a sustainable e-governance system incorporating big data and cloud computing. The proposed framework, Monitoring and Assessment System using Cloud (MASC), is validated through secondary data analysis and successfully fulfills the research objectives. By leveraging big data and cloud computing, governments can revolutionize their digital governance practices, driving transformative changes and enhancing efficiency and effectiveness in public administration.}, } @article {pmid38633810, year = {2024}, author = {Du, X and Novoa-Laurentiev, J and Plasaek, JM and Chuang, YW and Wang, L and Marshall, G and Mueller, SK and Chang, F and Datta, S and Paek, H and Lin, B and Wei, Q and Wang, X and Wang, J and Ding, H and Manion, FJ and Du, J and Bates, DW and Zhou, L}, title = {Enhancing Early Detection of Cognitive Decline in the Elderly: A Comparative Study Utilizing Large Language Models in Clinical Notes.}, journal = {medRxiv : the preprint server for health sciences}, volume = {}, number = {}, pages = {}, pmid = {38633810}, support = {R44 AG081006/AG/NIA NIH HHS/United States ; }, abstract = {BACKGROUND: Large language models (LLMs) have shown promising performance in various healthcare domains, but their effectiveness in identifying specific clinical conditions in real medical records is less explored. This study evaluates LLMs for detecting signs of cognitive decline in real electronic health record (EHR) clinical notes, comparing their error profiles with traditional models. The insights gained will inform strategies for performance enhancement.

METHODS: This study, conducted at Mass General Brigham in Boston, MA, analyzed clinical notes from the four years prior to a 2019 diagnosis of mild cognitive impairment in patients aged 50 and older. We used a randomly annotated sample of 4,949 note sections, filtered with keywords related to cognitive functions, for model development. For testing, a random annotated sample of 1,996 note sections without keyword filtering was utilized. We developed prompts for two LLMs, Llama 2 and GPT-4, on HIPAA-compliant cloud-computing platforms using multiple approaches (e.g., both hard and soft prompting and error analysis-based instructions) to select the optimal LLM-based method. Baseline models included a hierarchical attention-based neural network and XGBoost. Subsequently, we constructed an ensemble of the three models using a majority vote approach.

RESULTS: GPT-4 demonstrated superior accuracy and efficiency compared to Llama 2, but did not outperform traditional models. The ensemble model outperformed the individual models, achieving a precision of 90.3%, a recall of 94.2%, and an F1-score of 92.2%. Notably, the ensemble model showed a significant improvement in precision, increasing from a range of 70%-79% to above 90%, compared to the best-performing single model. Error analysis revealed that 63 samples were incorrectly predicted by at least one model; however, only 2 cases (3.2%) were mutual errors across all models, indicating diverse error profiles among them.

CONCLUSIONS: LLMs and traditional machine learning models trained using local EHR data exhibited diverse error profiles. The ensemble of these models was found to be complementary, enhancing diagnostic performance. Future research should investigate integrating LLMs with smaller, localized models and incorporating medical data and domain knowledge to enhance performance on specific tasks.}, } @article {pmid38628614, year = {2024}, author = {Wang, TH and Kao, CC and Chang, TH}, title = {Ensemble Machine Learning for Predicting 90-Day Outcomes and Analyzing Risk Factors in Acute Kidney Injury Requiring Dialysis.}, journal = {Journal of multidisciplinary healthcare}, volume = {17}, number = {}, pages = {1589-1602}, pmid = {38628614}, issn = {1178-2390}, abstract = {PURPOSE: Our objectives were to (1) employ ensemble machine learning algorithms utilizing real-world clinical data to predict 90-day prognosis, including dialysis dependence and mortality, following the first hospitalized dialysis and (2) identify the significant factors associated with overall outcomes.

PATIENTS AND METHODS: We identified hospitalized patients with Acute kidney injury requiring dialysis (AKI-D) from a dataset of the Taipei Medical University Clinical Research Database (TMUCRD) from January 2008 to December 2020. The extracted data comprise demographics, comorbidities, medications, and laboratory parameters. Ensemble machine learning models were developed utilizing real-world clinical data through the Google Cloud Platform.

RESULTS: The Study Analyzed 1080 Patients in the Dialysis-Dependent Module, Out of Which 616 Received Regular Dialysis After 90 Days. Our Ensemble Model, Consisting of 25 Feedforward Neural Network Models, Demonstrated the Best Performance with an Auroc of 0.846. We Identified the Baseline Creatinine Value, Assessed at Least 90 Days Before the Initial Dialysis, as the Most Crucial Factor. We selected 2358 patients, 984 of whom were deceased after 90 days, for the survival module. The ensemble model, comprising 15 feedforward neural network models and 10 gradient-boosted decision tree models, achieved superior performance with an AUROC of 0.865. The pre-dialysis creatinine value, tested within 90 days prior to the initial dialysis, was identified as the most significant factor.

CONCLUSION: Ensemble machine learning models outperform logistic regression models in predicting outcomes of AKI-D, compared to existing literature. Our study, which includes a large sample size from three different hospitals, supports the significance of the creatinine value tested before the first hospitalized dialysis in determining overall prognosis. Healthcare providers could benefit from utilizing our validated prediction model to improve clinical decision-making and enhance patient care for the high-risk population.}, } @article {pmid38628390, year = {2024}, author = {Fujinami, H and Kuraishi, S and Teramoto, A and Shimada, S and Takahashi, S and Ando, T and Yasuda, I}, title = {Development of a novel endoscopic hemostasis-assisted navigation AI system in the standardization of post-ESD coagulation.}, journal = {Endoscopy international open}, volume = {12}, number = {4}, pages = {E520-E525}, pmid = {38628390}, issn = {2364-3722}, abstract = {Background and study aims While gastric endoscopic submucosal dissection (ESD) has become a treatment with fewer complications, delayed bleeding remains a challenge. Post-ESD coagulation (PEC) is performed to prevent delayed bleeding. Therefore, we developed an artificial intelligence (AI) to detect vessels that require PEC in real time. Materials and methods Training data were extracted from 153 gastric ESD videos with sufficient images taken with a second-look endoscopy (SLE) and annotated as follows: (1) vessels that showed bleeding during SLE without PEC; (2) vessels that did not bleed during SLE with PEC; and (3) vessels that did not bleed even without PEC. The training model was created using Google Cloud Vertex AI and a program was created to display the vessels requiring PEC in real time using a bounding box. The evaluation of this AI was verified with 12 unlearned test videos, including four cases that required additional coagulation during SLE. Results The results of the test video validation indicated that 109 vessels on the ulcer required cauterization. Of these, 80 vessels (73.4%) were correctly determined as not requiring additional treatment. However, 25 vessels (22.9%), which did not require PEC, were overestimated. In the four videos that required additional coagulation in SLE, AI was able to detect all bleeding vessels. Conclusions The effectiveness and safety of this endoscopic treatment-assisted AI system that identifies visible vessels requiring PEC should be confirmed in future studies.}, } @article {pmid38625954, year = {2024}, author = {Frimpong, T and Hayfron Acquah, JB and Missah, YM and Dawson, JK and Ayawli, BBK and Baah, P and Sam, SA}, title = {Securing cloud data using secret key 4 optimization algorithm (SK4OA) with a non-linearity run time trend.}, journal = {PloS one}, volume = {19}, number = {4}, pages = {e0301760}, pmid = {38625954}, issn = {1932-6203}, mesh = {*Algorithms ; *Information Storage and Retrieval ; Cloud Computing ; Computer Security ; Microcomputers ; }, abstract = {Cloud computing alludes to the on-demand availability of personal computer framework resources, primarily information storage and processing power, without the customer's direct personal involvement. Cloud computing has developed dramatically among many organizations due to its benefits such as cost savings, resource pooling, broad network access, and ease of management; nonetheless, security has been a major concern. Researchers have proposed several cryptographic methods to offer cloud data security; however, their execution times are linear and longer. A Security Key 4 Optimization Algorithm (SK4OA) with a non-linear run time is proposed in this paper. The secret key of SK4OA determines the run time rather than the size of the data as such is able to transmit large volumes of data with minimal bandwidth and able to resist security attacks like brute force since its execution timings are unpredictable. A data set from Kaggle was used to determine the algorithm's mean and standard deviation after thirty (30) times of execution. Data sizes of 3KB, 5KB, 8KB, 12KB, and 16 KB were used in this study. There was an empirical analysis done against RC4, Salsa20, and Chacha20 based on encryption time, decryption time, throughput and memory utilization. The analysis showed that SK4OA generated lowest mean non-linear run time of 5.545±2.785 when 16KB of data was executed. Additionally, SK4OA's standard deviation was greater, indicating that the observed data varied far from the mean. However, RC4, Salsa20, and Chacha20 showed smaller standard deviations making them more clustered around the mean resulting in predictable run times.}, } @article {pmid38610575, year = {2024}, author = {Ocampo, AF and Fida, MR and Elmokashfi, A and Bryhni, H}, title = {Assessing the Cloud-RAN in the Linux Kernel: Sharing Computing and Network Resources.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {7}, pages = {}, pmid = {38610575}, issn = {1424-8220}, abstract = {Cloud-based Radio Access Network (Cloud-RAN) leverages virtualization to enable the coexistence of multiple virtual Base Band Units (vBBUs) with collocated workloads on a single edge computer, aiming for economic and operational efficiency. However, this coexistence can cause performance degradation in vBBUs due to resource contention. In this paper, we conduct an empirical analysis of vBBU performance on a Linux RT-Kernel, highlighting the impact of resource sharing with user-space tasks and Kernel threads. Furthermore, we evaluate CPU management strategies such as CPU affinity and CPU isolation as potential solutions to these performance challenges. Our results highlight that the implementation of CPU affinity can significantly reduce throughput variability by up to 40%, decrease vBBU's NACK ratios, and reduce vBBU scheduling latency within the Linux RT-Kernel. Collectively, these findings underscore the potential of CPU management strategies to enhance vBBU performance in Cloud-RAN environments, enabling more efficient and stable network operations. The paper concludes with a discussion on the efficient realization of Cloud-RAN, elucidating the benefits of implementing proposed CPU affinity allocations. The demonstrated enhancements, including reduced scheduling latency and improved end-to-end throughput, affirm the practicality and efficacy of the proposed strategies for optimizing Cloud-RAN deployments.}, } @article {pmid38610476, year = {2024}, author = {Liang, YP and Chang, CM and Chung, CC}, title = {Implementation of Lightweight Convolutional Neural Networks with an Early Exit Mechanism Utilizing 40 nm CMOS Process for Fire Detection in Unmanned Aerial Vehicles.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {7}, pages = {}, pmid = {38610476}, issn = {1424-8220}, support = {MOST-111-2221- E-194-049-//Ministry of Science and Technology of Taiwan/ ; }, abstract = {The advancement of unmanned aerial vehicles (UAVs) enables early detection of numerous disasters. Efforts have been made to automate the monitoring of data from UAVs, with machine learning methods recently attracting significant interest. These solutions often face challenges with high computational costs and energy usage. Conventionally, data from UAVs are processed using cloud computing, where they are sent to the cloud for analysis. However, this method might not meet the real-time needs of disaster relief scenarios. In contrast, edge computing provides real-time processing at the site but still struggles with computational and energy efficiency issues. To overcome these obstacles and enhance resource utilization, this paper presents a convolutional neural network (CNN) model with an early exit mechanism designed for fire detection in UAVs. This model is implemented using TSMC 40 nm CMOS technology, which aids in hardware acceleration. Notably, the neural network has a modest parameter count of 11.2 k. In the hardware computation part, the CNN circuit completes fire detection in approximately 230,000 cycles. Power-gating techniques are also used to turn off inactive memory, contributing to reduced power consumption. The experimental results show that this neural network reaches a maximum accuracy of 81.49% in the hardware implementation stage. After automatic layout and routing, the CNN hardware accelerator can operate at 300 MHz, consuming 117 mW of power.}, } @article {pmid38610447, year = {2024}, author = {Gomes, B and Soares, C and Torres, JM and Karmali, K and Karmali, S and Moreira, RS and Sobral, P}, title = {An Efficient Edge Computing-Enabled Network for Used Cooking Oil Collection.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {7}, pages = {}, pmid = {38610447}, issn = {1424-8220}, abstract = {In Portugal, more than 98% of domestic cooking oil is disposed of improperly every day. This avoids recycling/reconverting into another energy. Is also may become a potential harmful contaminant of soil and water. Driven by the utility of recycled cooking oil, and leveraging the exponential growth of ubiquitous computing approaches, we propose an IoT smart solution for domestic used cooking oil (UCO) collection bins. We call this approach SWAN, which stands for Smart Waste Accumulation Network. It is deployed and evaluated in Portugal. It consists of a countrywide network of collection bin units, available in public areas. Two metrics are considered to evaluate the system's success: (i) user engagement, and (ii) used cooking oil collection efficiency. The presented system should (i) perform under scenarios of temporary communication network failures, and (ii) be scalable to accommodate an ever-growing number of installed collection units. Thus, we choose a disruptive approach from the traditional cloud computing paradigm. It relies on edge node infrastructure to process, store, and act upon the locally collected data. The communication appears as a delay-tolerant task, i.e., an edge computing solution. We conduct a comparative analysis revealing the benefits of the edge computing enabled collection bin vs. a cloud computing solution. The studied period considers four years of collected data. An exponential increase in the amount of used cooking oil collected is identified, with the developed solution being responsible for surpassing the national collection totals of previous years. During the same period, we also improved the collection process as we were able to more accurately estimate the optimal collection and system's maintenance intervals.}, } @article {pmid38610327, year = {2024}, author = {Armijo, A and Zamora-Sánchez, D}, title = {Integration of Railway Bridge Structural Health Monitoring into the Internet of Things with a Digital Twin: A Case Study.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {7}, pages = {}, pmid = {38610327}, issn = {1424-8220}, support = {ZL-2020/00902//Basque Government/ ; GA10112353//European Commission/ ; GA10108395//European Commission/ ; }, abstract = {Structural health monitoring (SHM) is critical for ensuring the safety of infrastructure such as bridges. This article presents a digital twin solution for the SHM of railway bridges using low-cost wireless accelerometers and machine learning (ML). The system architecture combines on-premises edge computing and cloud analytics to enable efficient real-time monitoring and complete storage of relevant time-history datasets. After train crossings, the accelerometers stream raw vibration data, which are processed in the frequency domain and analyzed using machine learning to detect anomalies that indicate potential structural issues. The digital twin approach is demonstrated on an in-service railway bridge for which vibration data were collected over two years under normal operating conditions. By learning allowable ranges for vibration patterns, the digital twin model identifies abnormal spectral peaks that indicate potential changes in structural integrity. The long-term pilot proves that this affordable SHM system can provide automated and real-time warnings of bridge damage and also supports the use of in-house-designed sensors with lower cost and edge computing capabilities such as those used in the demonstration. The successful on-premises-cloud hybrid implementation provides a cost effective and scalable model for expanding monitoring to thousands of railway bridges, democratizing SHM to improve safety by avoiding catastrophic failures.}, } @article {pmid38610235, year = {2024}, author = {Gaffurini, M and Flammini, A and Ferrari, P and Fernandes Carvalho, D and Godoy, EP and Sisinni, E}, title = {End-to-End Emulation of LoRaWAN Architecture and Infrastructure in Complex Smart City Scenarios Exploiting Containers.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {7}, pages = {}, pmid = {38610235}, issn = {1424-8220}, support = {1033 17/06/2022, CN00000023//European Union/ ; }, abstract = {In a LoRaWAN network, the backend is generally distributed as Software as a Service (SaaS) based on container technology, and recently, a containerized version of the LoRaWAN node stack is also available. Exploiting the disaggregation of LoRaWAN components, this paper focuses on the emulation of complex end-to-end architecture and infrastructures for smart city scenarios, leveraging on lightweight virtualization technology. The fundamental metrics to gain insights and evaluate the scaling complexity of the emulated scenario are defined. Then, the methodology is applied to use cases taken from a real LoRaWAN application in a smart city with hundreds of nodes. As a result, the proposed approach based on containers allows for the following: (i) deployments of functionalities on diverse distributed hosts; (ii) the use of the very same SW running on real nodes; (iii) the simple configuration and management of the emulation process; (iv) affordable costs. Both premise and cloud servers are considered as emulation platforms to evaluate the resource request and emulation cost of the proposed approach. For instance, emulating one hour of an entire LoRaWAN network with hundreds of nodes requires very affordable hardware that, if realized with a cloud-based computing platform, may cost less than USD 1.}, } @article {pmid38609681, year = {2024}, author = {Gupta, P and Shukla, DP}, title = {Demi-decadal land use land cover change analysis of Mizoram, India, with topographic correction using machine learning algorithm.}, journal = {Environmental science and pollution research international}, volume = {31}, number = {21}, pages = {30569-30591}, pmid = {38609681}, issn = {1614-7499}, mesh = {India ; *Machine Learning ; *Algorithms ; Agriculture ; Forests ; Support Vector Machine ; Conservation of Natural Resources ; }, abstract = {Mizoram (India) is part of UNESCO's biodiversity hotspots in India that is primarily populated by tribes who engage in shifting agriculture. Hence, the land use land cover (LULC) pattern of the state is frequently changing. We have used Landsat 5 and 8 satellite images to prepare LULC maps from 2000 to 2020 in every 5 years. The atmospherically corrected images were pre-processed for removal of cloud cover and then classified into six classes: waterbodies, farmland, settlement, open forest, dense forest, and bare land. We applied four machine learning (ML) algorithms for classification, namely, random forest (RF), classification and regression tree (CART), minimum distance (MD), and support vector machine (SVM) for the images from 2000 to 2020. With 80% training and 20% testing data, we found that the RF classifier works best with the most accuracy than other classifiers. The average overall accuracy (OA) and Kappa coefficient (KC) from 2000 to 2020 were 84.00% and 0.79 when the RF classifier was used. When using SVM, CART, and MD, the average OA and KC were 78.06%, 0.73; 78.60%, 0.72; and 73.32%, 0.65, respectively. We utilised three methods of topographic correction, namely, C-correction, SCS (sun canopy sensor) correction, and SCS + C correction to reduce the misclassification due to shadow effects. SCS + C correction worked best for this region; hence, we prepared LULC maps on SCS + C corrected satellite image. Hence, we have used RF classifier for LULC preparation demi-decadal from 2000 to 2020. The OA for 2000, 2005, 2010, 2015, and 2020 was found to be 84%, 81%, 81%, 85%, and 89%, respectively, using RF. The dense forest decreased from 2000 to 2020 with an increase in open forest, settlement, and agriculture; nevertheless, when Farmland was low, there was an increase in the barren land. The results were significantly improved with the topographic correction, and misclassification was quite less.}, } @article {pmid38609409, year = {2024}, author = {Zhang, Y and Geng, H and Su, L and He, S and Lu, L}, title = {An efficient polynomial-based verifiable computation scheme on multi-source outsourced data.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {8512}, pmid = {38609409}, issn = {2045-2322}, support = {U22B2038//Research and Verification of Key Technologies for Secure and Efficient Federated Learning/ ; }, abstract = {With the development of cloud computing, users are more inclined to outsource complex computing tasks to cloud servers with strong computing capacity, and the cloud returns the final calculation results. However, the cloud is not completely trustworthy, which may leak the data of user and even return incorrect calculations on purpose. Therefore, it is important to verify the results of computing tasks without revealing the privacy of the users. Among all the computing tasks, the polynomial calculation is widely used in information security, linear algebra, signal processing and other fields. Most existing polynomial-based verifiable computation schemes require that the input of the polynomial function must come from a single data source, which means that the data must be signed by a single user. However, the input of the polynomial may come from multiple users in the practical application. In order to solve this problem, the researchers have proposed some schemes for multi-source outsourced data, but these schemes have the common problem of low efficiency. To improve the efficiency, this paper proposes an efficient polynomial-based verifiable computation scheme on multi-source outsourced data. We optimize the polynomials using Horner's method to increase the speed of verification, in which the addition gate and the multiplication gate can be interleaved to represent the polynomial function. In order to adapt to this structure, we design the corresponding homomorphic verification tag, so that the input of the polynomial can come from multiple data sources. We prove the correctness and rationality of the scheme, and carry out numerical analysis and evaluation research to verify the efficiency of the scheme. The experimental indicate that data contributors can sign 1000 new data in merely 2 s, while the verification of a delegated polynomial function with a power of 100 requires only 18 ms. These results confirm that the proposed scheme is better than the existing scheme.}, } @article {pmid38606391, year = {2024}, author = {Li, S and Nair, R and Naqvi, SM}, title = {Acoustic and Text Features Analysis for Adult ADHD Screening: A Data-Driven Approach Utilizing DIVA Interview.}, journal = {IEEE journal of translational engineering in health and medicine}, volume = {12}, number = {}, pages = {359-370}, pmid = {38606391}, issn = {2168-2372}, mesh = {Adult ; Humans ; *Attention Deficit Disorder with Hyperactivity/diagnosis ; Treatment Outcome ; Magnetic Resonance Imaging ; }, abstract = {Attention Deficit Hyperactivity Disorder (ADHD) is a neurodevelopmental disorder commonly seen in childhood that leads to behavioural changes in social development and communication patterns, often continues into undiagnosed adulthood due to a global shortage of psychiatrists, resulting in delayed diagnoses with lasting consequences on individual's well-being and the societal impact. Recently, machine learning methodologies have been incorporated into healthcare systems to facilitate the diagnosis and enhance the potential prediction of treatment outcomes for mental health conditions. In ADHD detection, the previous research focused on utilizing functional magnetic resonance imaging (fMRI) or Electroencephalography (EEG) signals, which require costly equipment and trained personnel for data collection. In recent years, speech and text modalities have garnered increasing attention due to their cost-effectiveness and non-wearable sensing in data collection. In this research, conducted in collaboration with the Cumbria, Northumberland, Tyne and Wear NHS Foundation Trust, we gathered audio data from both ADHD patients and normal controls based on the clinically popular Diagnostic Interview for ADHD in adults (DIVA). Subsequently, we transformed the speech data into text modalities through the utilization of the Google Cloud Speech API. We extracted both acoustic and text features from the data, encompassing traditional acoustic features (e.g., MFCC), specialized feature sets (e.g., eGeMAPS), as well as deep-learned linguistic and semantic features derived from pre-trained deep learning models. These features are employed in conjunction with a support vector machine for ADHD classification, yielding promising outcomes in the utilization of audio and text data for effective adult ADHD screening. Clinical impact: This research introduces a transformative approach in ADHD diagnosis, employing speech and text analysis to facilitate early and more accessible detection, particularly beneficial in areas with limited psychiatric resources. Clinical and Translational Impact Statement: The successful application of machine learning techniques in analyzing audio and text data for ADHD screening represents a significant advancement in mental health diagnostics, paving the way for its integration into clinical settings and potentially improving patient outcomes on a broader scale.}, } @article {pmid38601602, year = {2024}, author = {Sachdeva, S and Bhatia, S and Al Harrasi, A and Shah, YA and Anwer, K and Philip, AK and Shah, SFA and Khan, A and Ahsan Halim, S}, title = {Unraveling the role of cloud computing in health care system and biomedical sciences.}, journal = {Heliyon}, volume = {10}, number = {7}, pages = {e29044}, pmid = {38601602}, issn = {2405-8440}, abstract = {Cloud computing has emerged as a transformative force in healthcare and biomedical sciences, offering scalable, on-demand resources for managing vast amounts of data. This review explores the integration of cloud computing within these fields, highlighting its pivotal role in enhancing data management, security, and accessibility. We examine the application of cloud computing in various healthcare domains, including electronic medical records, telemedicine, and personalized patient care, as well as its impact on bioinformatics research, particularly in genomics, proteomics, and metabolomics. The review also addresses the challenges and ethical considerations associated with cloud-based healthcare solutions, such as data privacy and cybersecurity. By providing a comprehensive overview, we aim to assist readers in understanding the significance of cloud computing in modern medical applications and its potential to revolutionize both patient care and biomedical research.}, } @article {pmid38591672, year = {2024}, author = {Hicks, CB and Martinez, TJ}, title = {Massively scalable workflows for quantum chemistry: BigChem and ChemCloud.}, journal = {The Journal of chemical physics}, volume = {160}, number = {14}, pages = {}, doi = {10.1063/5.0190834}, pmid = {38591672}, issn = {1089-7690}, abstract = {Electronic structure theory, i.e., quantum chemistry, is the fundamental building block for many problems in computational chemistry. We present a new distributed computing framework (BigChem), which allows for an efficient solution of many quantum chemistry problems in parallel. BigChem is designed to be easily composable and leverages industry-standard middleware (e.g., Celery, RabbitMQ, and Redis) for distributed approaches to large scale problems. BigChem can harness any collection of worker nodes, including ones on cloud providers (such as AWS or Azure), local clusters, or supercomputer centers (and any mixture of these). BigChem builds upon MolSSI packages, such as QCEngine to standardize the operation of numerous computational chemistry programs, demonstrated here with Psi4, xtb, geomeTRIC, and TeraChem. BigChem delivers full utilization of compute resources at scale, offers a programable canvas for designing sophisticated quantum chemistry workflows, and is fault tolerant to node failures and network disruptions. We demonstrate linear scalability of BigChem running computational chemistry workloads on up to 125 GPUs. Finally, we present ChemCloud, a web API to BigChem and successor to TeraChem Cloud. ChemCloud delivers scalable and secure access to BigChem over the Internet.}, } @article {pmid38589881, year = {2024}, author = {Holl, F and Clarke, L and Raffort, T and Serres, E and Archer, L and Saaristo, P}, title = {The Red Cross Red Crescent Health Information System (RCHIS): an electronic medical records and health information management system for the red cross red crescent emergency response units.}, journal = {Conflict and health}, volume = {18}, number = {1}, pages = {28}, pmid = {38589881}, issn = {1752-1505}, support = {001/WHO_/World Health Organization/International ; }, abstract = {BACKGROUND: The Red Cross and Red Crescent Movement (RCRC) utilizes specialized Emergency Response Units (ERUs) for international disaster response. However, data collection and reporting within ERUs have been time-consuming and paper-based. The Red Cross Red Crescent Health Information System (RCHIS) was developed to improve clinical documentation and reporting, ensuring accuracy and ease of use while increasing compliance with reporting standards.

CASE PRESENTATION: RCHIS is an Electronic Medical Record (EMR) and Health Information System (HIS) designed for RCRC ERUs. It can be accessed on Android tablets or Windows laptops, both online and offline. The system securely stores data on Microsoft Azure cloud, with synchronization facilitated through a local ERU server. The functional architecture covers all clinical functions of ERU clinics and hospitals, incorporating user-friendly features. A pilot study was conducted with the Portuguese Red Cross (PRC) during a large-scale event. Thirteen super users were trained and subsequently trained the staff. During the four-day pilot, 77 user accounts were created, and 243 patient files were documented. Feedback indicated that RCHIS was easy to use, requiring minimal training time, and had sufficient training for full utilization. Real-time reporting facilitated coordination with the civil defense authority.

CONCLUSIONS: The development and pilot use of RCHIS demonstrated its feasibility and efficacy within RCRC ERUs. The system addressed the need for an EMR and HIS solution, enabling comprehensive clinical documentation and supporting administrative reporting functions. The pilot study validated the training of trainers' approach and paved the way for further domestic use of RCHIS. RCHIS has the potential to improve patient safety, quality of care, and reporting efficiency within ERUs. Automated reporting reduces the burden on ERU leadership, while electronic compilation enhances record completeness and correctness. Ongoing feedback collection and feature development continue to enhance RCHIS's functionality. Further trainings took place in 2023 and preparations for international deployments are under way. RCHIS represents a significant step toward improved emergency medical care and coordination within the RCRC and has implications for similar systems in other Emergency Medical Teams.}, } @article {pmid38586319, year = {2024}, author = {Chen, A and Yu, S and Yang, X and Huang, D and Ren, Y}, title = {IoT data security in outsourced databases: A survey of verifiable database.}, journal = {Heliyon}, volume = {10}, number = {7}, pages = {e28117}, pmid = {38586319}, issn = {2405-8440}, abstract = {With the swift advancement of cloud computing and the Internet of Things (IoT), to address the issue of massive data storage, IoT devices opt to offload their data to cloud servers so as to alleviate the pressure of resident storage and computation. However, storing local data in an outsourced database is bound to face the danger of tampering. To handle the above problem, a verifiable database (VDB), which was initially suggested in 2011, has garnered sustained interest from researchers. The concept of VDB enables resource-limited clients to securely outsource extremely large databases to untrusted servers, where users can retrieve database records and modify them by allocating new values, and any attempts at tampering will be detected. This paper provides a systematic summary of VDB. First, a definition of VDB is given, along with correctness and security proofs. And the VDB based on commitment constructions is introduced separately, mainly divided into vector commitments and polynomial commitments. Then VDB schemes based on delegated polynomial functions are introduced, mainly in combination with Merkle trees and forward-secure symmetric searchable encryption. We then classify the current VDB schemes relying on four different assumptions. Besides, we classify the established VDB schemes built upon two different groups. Finally, we introduce the applications and future development of VDB. To our knowledge, this is the first VDB review paper to date.}, } @article {pmid38585837, year = {2024}, author = {Mimar, S and Paul, AS and Lucarelli, N and Border, S and Santo, BA and Naglah, A and Barisoni, L and Hodgin, J and Rosenberg, AZ and Clapp, W and Sarder, P and , }, title = {ComPRePS: An Automated Cloud-based Image Analysis tool to democratize AI in Digital Pathology.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {38585837}, issn = {2692-8205}, support = {R01 DK118431/DK/NIDDK NIH HHS/United States ; R21 DK128668/DK/NIDDK NIH HHS/United States ; R01 DK114485/DK/NIDDK NIH HHS/United States ; U01 DK133090/DK/NIDDK NIH HHS/United States ; R01 DK129541/DK/NIDDK NIH HHS/United States ; OT2 OD033753/OD/NIH HHS/United States ; }, abstract = {Artificial intelligence (AI) has extensive applications in a wide range of disciplines including healthcare and clinical practice. Advances in high-resolution whole-slide brightfield microscopy allow for the digitization of histologically stained tissue sections, producing gigapixel-scale whole-slide images (WSI). The significant improvement in computing and revolution of deep neural network (DNN)-based AI technologies over the last decade allow us to integrate massively parallelized computational power, cutting-edge AI algorithms, and big data storage, management, and processing. Applied to WSIs, AI has created opportunities for improved disease diagnostics and prognostics with the ultimate goal of enhancing precision medicine and resulting patient care. The National Institutes of Health (NIH) has recognized the importance of developing standardized principles for data management and discovery for the advancement of science and proposed the Findable, Accessible, Interoperable, Reusable, (FAIR) Data Principles[1] with the goal of building a modernized biomedical data resource ecosystem to establish collaborative research communities. In line with this mission and to democratize AI-based image analysis in digital pathology, we propose ComPRePS: an end-to-end automated Computational Renal Pathology Suite which combines massive scalability, on-demand cloud computing, and an easy-to-use web-based user interface for data upload, storage, management, slide-level visualization, and domain expert interaction. Moreover, our platform is equipped with both in-house and collaborator developed sophisticated AI algorithms in the back-end server for image analysis to identify clinically relevant micro-anatomic functional tissue units (FTU) and to extract image features.}, } @article {pmid38584872, year = {2024}, author = {Copeland, CJ and Roddy, JW and Schmidt, AK and Secor, PR and Wheeler, TJ}, title = {VIBES: a workflow for annotating and visualizing viral sequences integrated into bacterial genomes.}, journal = {NAR genomics and bioinformatics}, volume = {6}, number = {2}, pages = {lqae030}, pmid = {38584872}, issn = {2631-9268}, support = {R01 AI138981/AI/NIAID NIH HHS/United States ; R01 GM132600/GM/NIGMS NIH HHS/United States ; }, abstract = {Bacteriophages are viruses that infect bacteria. Many bacteriophages integrate their genomes into the bacterial chromosome and become prophages. Prophages may substantially burden or benefit host bacteria fitness, acting in some cases as parasites and in others as mutualists. Some prophages have been demonstrated to increase host virulence. The increasing ease of bacterial genome sequencing provides an opportunity to deeply explore prophage prevalence and insertion sites. Here we present VIBES (Viral Integrations in Bacterial genomES), a workflow intended to automate prophage annotation in complete bacterial genome sequences. VIBES provides additional context to prophage annotations by annotating bacterial genes and viral proteins in user-provided bacterial and viral genomes. The VIBES pipeline is implemented as a Nextflow-driven workflow, providing a simple, unified interface for execution on local, cluster and cloud computing environments. For each step of the pipeline, a container including all necessary software dependencies is provided. VIBES produces results in simple tab-separated format and generates intuitive and interactive visualizations for data exploration. Despite VIBES's primary emphasis on prophage annotation, its generic alignment-based design allows it to be deployed as a general-purpose sequence similarity search manager. We demonstrate the utility of the VIBES prophage annotation workflow by searching for 178 Pf phage genomes across 1072 Pseudomonas spp. genomes.}, } @article {pmid38578775, year = {2024}, author = {Nawaz Tareen, F and Alvi, AN and Alsamani, B and Alkhathami, M and Alsadie, D and Alosaimi, N}, title = {EOTE-FSC: An efficient offloaded task execution for fog enabled smart cities.}, journal = {PloS one}, volume = {19}, number = {4}, pages = {e0298363}, pmid = {38578775}, issn = {1932-6203}, mesh = {Cities ; *Algorithms ; *Communication ; Health Facilities ; Information Science ; }, abstract = {Smart cities provide ease in lifestyle to their community members with the help of Information and Communication Technology (ICT). It provides better water, waste and energy management, enhances the security and safety of its citizens and offers better health facilities. Most of these applications are based on IoT-based sensor networks, that are deployed in different areas of applications according to their demand. Due to limited processing capabilities, sensor nodes cannot process multiple tasks simultaneously and need to offload some of their tasks to remotely placed cloud servers, which may cause delays. To reduce the delay, computing nodes are placed in different vicinitys acting as fog-computing nodes are used, to execute the offloaded tasks. It has been observed that the offloaded tasks are not uniformly received by fog computing nodes and some fog nodes may receive more tasks as some may receive less number of tasks. This may cause an increase in overall task execution time. Furthermore, these tasks comprise different priority levels and must be executed before their deadline. In this work, an Efficient Offloaded Task Execution for Fog enabled Smart cities (EOTE - FSC) is proposed. EOTE - FSC proposes a load balancing mechanism by modifying the greedy algorithm to efficiently distribute the offloaded tasks to its attached fog nodes to reduce the overall task execution time. This results in the successful execution of most of the tasks within their deadline. In addition, EOTE - FSC modifies the task sequencing with a deadline algorithm for the fog node to optimally execute the offloaded tasks in such a way that most of the high-priority tasks are entertained. The load balancing results of EOTE - FSC are compared with state-of-the-art well-known Round Robin, Greedy, Round Robin with longest job first, and Round Robin with shortest job first algorithms. However, fog computing results of EOTE - FSC are compared with the First Come First Serve algorithm. The results show that the EOTE - FSC effectively offloaded the tasks on fog nodes and the maximum load on the fog computing nodes is reduced up to 29%, 27.3%, 23%, and 24.4% as compared to Round Robin, Greedy, Round Robin with LJF and Round Robin with SJF algorithms respectively. However, task execution in the proposed EOTE - FSC executes a maximum number of offloaded high-priority tasks as compared to the FCFS algorithm within the same computing capacity of fog nodes.}, } @article {pmid38568312, year = {2024}, author = {Khan, NS and Roy, SK and Talukdar, S and Billah, M and Iqbal, A and Zzaman, RU and Chowdhury, A and Mahtab, SB and Mallick, J}, title = {Empowering real-time flood impact assessment through the integration of machine learning and Google Earth Engine: a comprehensive approach.}, journal = {Environmental science and pollution research international}, volume = {31}, number = {41}, pages = {53877-53892}, pmid = {38568312}, issn = {1614-7499}, mesh = {*Floods ; *Machine Learning ; Bangladesh ; Support Vector Machine ; Neural Networks, Computer ; Humans ; }, abstract = {Floods cause substantial losses to life and property, especially in flood-prone regions like northwestern Bangladesh. Timely and precise evaluation of flood impacts is critical for effective flood management and decision-making. This research demonstrates an integrated approach utilizing machine learning and Google Earth Engine to enable real-time flood assessment. Synthetic aperture radar (SAR) data from Sentinel-1 and the Google Earth Engine platform were employed to generate near real-time flood maps of the 2020 flood in Kurigram and Lalmonirhat. An automatic thresholding technique quantified flooded areas. For land use/land cover (LULC) analysis, Sentinel-2's high resolution and machine learning models like artificial neural networks (ANN), random forests (RF) and support vector machines (SVM) were leveraged. ANN delivered the best LULC mapping with 0.94 accuracy based on metrics like accuracy, kappa, mean F1 score, mean sensitivity, mean specificity, mean positive predictive value, mean negative value, mean precision, mean recall, mean detection rate and mean balanced accuracy. Results showed over 600,000 people exposed at peak inundation in July-about 17% of the population. The machine learning-enabled LULC maps reliably identified vulnerable areas to prioritize flood management. Over half of croplands flooded in July. This research demonstrates the potential of integrating SAR, machine learning and cloud computing to empower authorities through real-time monitoring and accurate LULC mapping essential for effective flood response. The proposed comprehensive methodology can assist stakeholders in developing data-driven flood management strategies to reduce impacts.}, } @article {pmid38560228, year = {2024}, author = {Gheni, HM and AbdulRahaim, LA and Abdellatif, A}, title = {Real-time driver identification in IoV: A deep learning and cloud integration approach.}, journal = {Heliyon}, volume = {10}, number = {7}, pages = {e28109}, pmid = {38560228}, issn = {2405-8440}, abstract = {The Internet of Vehicles (IoV) emerges as a pivotal extension of the Internet of Things (IoT), specifically geared towards transforming the automotive landscape. In this evolving ecosystem, the demand for a seamless end-to-end system becomes paramount for enhancing operational efficiency and safety. Hence, this study introduces an innovative method for real-time driver identification by integrating cloud computing with deep learning. Utilizing the integrated capabilities of Google Cloud, Thingsboard, and Apache Kafka, the developed solution tailored for IoV technology is adept at managing real-time data collection, processing, prediction, and visualization, with resilience against sensor data anomalies. Also, this research suggests an appropriate method for driver identification by utilizing a combination of Convolutional Neural Networks (CNN) and multi-head self-attention in the proposed approach. The proposed model is validated on two datasets: Security and collected. Moreover, the results show that the proposed model surpassed the previous works by achieving an accuracy and F1 score of 99.95%. Even when challenged with data anomalies, this model maintains a high accuracy of 96.2%. By achieving accurate driver identification results, the proposed end-to-end IoV system can aid in optimizing fleet management, vehicle security, personalized driving experiences, insurance, and risk assessment. This emphasizes its potential for road safety and managing transportation more effectively.}, } @article {pmid38559152, year = {2024}, author = {Li, Y and Xue, F and Li, B and Yang, Y and Fan, Z and Shu, J and Yang, X and Wang, X and Lin, J and Copana, C and Zhao, B}, title = {Analyzing bivariate cross-trait genetic architecture in GWAS summary statistics with the BIGA cloud computing platform.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {38559152}, issn = {2692-8205}, abstract = {As large-scale biobanks provide increasing access to deep phenotyping and genomic data, genome-wide association studies (GWAS) are rapidly uncovering the genetic architecture behind various complex traits and diseases. GWAS publications typically make their summary-level data (GWAS summary statistics) publicly available, enabling further exploration of genetic overlaps between phenotypes gathered from different studies and cohorts. However, systematically analyzing high-dimensional GWAS summary statistics for thousands of phenotypes can be both logistically challenging and computationally demanding. In this paper, we introduce BIGA (https://bigagwas.org/), a website that aims to offer unified data analysis pipelines and processed data resources for cross-trait genetic architecture analyses using GWAS summary statistics. We have developed a framework to implement statistical genetics tools on a cloud computing platform, combined with extensive curated GWAS data resources. Through BIGA, users can upload data, submit jobs, and share results, providing the research community with a convenient tool for consolidating GWAS data and generating new insights.}, } @article {pmid38559026, year = {2024}, author = {Marini, S and Barquero, A and Wadhwani, AA and Bian, J and Ruiz, J and Boucher, C and Prosperi, M}, title = {OCTOPUS: Disk-based, Multiplatform, Mobile-friendly Metagenomics Classifier.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {38559026}, issn = {2692-8205}, support = {R01 AI141810/AI/NIAID NIH HHS/United States ; R01 AI145552/AI/NIAID NIH HHS/United States ; R01 AI170187/AI/NIAID NIH HHS/United States ; }, abstract = {Portable genomic sequencers such as Oxford Nanopore's MinION enable real-time applications in clinical and environmental health. However, there is a bottleneck in the downstream analytics when bioinformatics pipelines are unavailable, e.g., when cloud processing is unreachable due to absence of Internet connection, or only low-end computing devices can be carried on site. Here we present a platform-friendly software for portable metagenomic analysis of Nanopore data, the Oligomer-based Classifier of Taxonomic Operational and Pan-genome Units via Singletons (OCTOPUS). OCTOPUS is written in Java, reimplements several features of the popular Kraken2 and KrakenUniq software, with original components for improving metagenomics classification on incomplete/sampled reference databases, making it ideal for running on smartphones or tablets. OCTOPUS obtains sensitivity and precision comparable to Kraken2, while dramatically decreasing (4- to 16-fold) the false positive rate, and yielding high correlation on real-word data. OCTOPUS is available along with customized databases at https://github.com/DataIntellSystLab/OCTOPUS and https://github.com/Ruiz-HCI-Lab/OctopusMobile.}, } @article {pmid38555378, year = {2024}, author = {Du, J and Dong, G and Ning, J and Xu, Z and Yang, R}, title = {Identity-based controlled delegated outsourcing data integrity auditing scheme.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {7582}, pmid = {38555378}, issn = {2045-2322}, support = {2023SKY007//Yunnan Minzu University Graduate Research Innovation Fund Project/ ; 61662089//National Natural Science Foundation of China/ ; }, abstract = {With the continuous development of cloud computing, the application of cloud storage has become more and more popular. To ensure the integrity and availability of cloud data, scholars have proposed several cloud data auditing schemes. Still, most need help with outsourced data integrity, controlled outsourcing, and source file auditing. Therefore, we propose a controlled delegation outsourcing data integrity auditing scheme based on the identity-based encryption model. Our proposed scheme allows users to specify a dedicated agent to assist in uploading data to the cloud. These authorized proxies use recognizable identities for authentication and authorization, thus avoiding the need for cumbersome certificate management in a secure distributed computing system. While solving the above problems, our scheme adopts a bucket-based red-black tree structure to efficiently realize the dynamic updating of data, which can complete the updating of data and rebalancing of structural updates constantly and realize the high efficiency of data operations. We define the security model of the scheme in detail and prove the scheme's security under the difficult problem assumption. In the performance analysis section, the proposed scheme is analyzed experimentally in comparison with other schemes, and the results show that the proposed scheme is efficient and secure.}, } @article {pmid38546988, year = {2025}, author = {Chen, X and Xu, G and Xu, X and Jiang, H and Tian, Z and Ma, T}, title = {Multicenter Hierarchical Federated Learning With Fault-Tolerance Mechanisms for Resilient Edge Computing Networks.}, journal = {IEEE transactions on neural networks and learning systems}, volume = {36}, number = {1}, pages = {47-61}, doi = {10.1109/TNNLS.2024.3362974}, pmid = {38546988}, issn = {2162-2388}, abstract = {In the realm of federated learning (FL), the conventional dual-layered architecture, comprising a central parameter server and peripheral devices, often encounters challenges due to its significant reliance on the central server for communication and security. This dependence becomes particularly problematic in scenarios involving potential malfunctions of devices and servers. While existing device-edge-cloud hierarchical FL (HFL) models alleviate some dependence on central servers and reduce communication overheads, they primarily focus on load balancing within edge computing networks and fall short of achieving complete decentralization and edge-centric model aggregation. Addressing these limitations, we introduce the multicenter HFL (MCHFL) framework. This innovative framework replaces the traditional single central server architecture with a distributed network of robust global aggregation centers located at the edge, inherently enhancing fault tolerance crucial for maintaining operational integrity amidst edge network disruptions. Our comprehensive experiments with the MNIST, FashionMNIST, and CIFAR-10 datasets demonstrate the MCHFL's superior performance. Notably, even under high paralysis ratios of up to 50%, the MCHFL maintains high accuracy levels, with maximum accuracy reductions of only 2.60%, 5.12%, and 16.73% on these datasets, respectively. This performance significantly surpasses the notable accuracy declines observed in traditional single-center models under similar conditions. To the best of our knowledge, the MCHFL is the first edge multicenter FL framework with theoretical underpinnings. Our extensive experimental results across various datasets validate the MCHFL's effectiveness, showcasing its higher accuracy, faster convergence speed, and stronger robustness compared to single-center models, thereby establishing it as a pioneering paradigm in edge multicenter FL.}, } @article {pmid38545518, year = {2024}, author = {Lock, C and Toh, EMS and Keong, NC}, title = {Structural volumetric and Periodic Table DTI patterns in Complex Normal Pressure Hydrocephalus-Toward the principles of a translational taxonomy.}, journal = {Frontiers in human neuroscience}, volume = {18}, number = {}, pages = {1188533}, pmid = {38545518}, issn = {1662-5161}, abstract = {INTRODUCTION: We previously proposed a novel taxonomic framework to describe the diffusion tensor imaging (DTI) profiles of white matter tracts by their diffusivity and neural properties. We have shown the relevance of this strategy toward interpreting brain tissue signatures in Classic Normal Pressure Hydrocephalus vs. comparator cohorts of mild traumatic brain injury and Alzheimer's disease. In this iteration of the Periodic Table of DTI Elements, we examined patterns of tissue distortion in Complex NPH (CoNPH) and validated the methodology against an open-access dataset of healthy subjects, to expand its accessibility to a larger community.

METHODS: DTI measures for 12 patients with CoNPH with multiple comorbidities and 45 cognitively normal controls from the ADNI database were derived using the image processing pipeline on the brainlife.io open cloud computing platform. Using the Periodic Table algorithm, DTI profiles for CoNPH vs. controls were mapped according to injury patterns.

RESULTS: Structural volumes in most structures tested were significantly lower and the lateral ventricles higher in CoNPH vs. controls. In CoNPH, significantly lower fractional anisotropy (FA) and higher mean, axial, and radial diffusivities (MD, L1, and L2 and 3, respectively) were observed in white matter related to the lateral ventricles. Most diffusivity measures across supratentorial and infratentorial structures were significantly higher in CoNPH, with the largest differences in the cerebellum cortex. In subcortical deep gray matter structures, CoNPH and controls differed most significantly in the hippocampus, with the CoNPH group having a significantly lower FA and higher MD, L1, and L2 and 3. Cerebral and cerebellar white matter demonstrated more potential reversibility of injury compared to cerebral and cerebellar cortices.

DISCUSSION: The findings of widespread and significant reductions in subcortical deep gray matter structures, in comparison to healthy controls, support the hypothesis that Complex NPH cohorts retain imaging features associated with Classic NPH. The use of the algorithm of the Periodic Table allowed for greater consistency in the interpretation of DTI results by focusing on patterns of injury rather than an over-reliance on the interrogation of individual measures by statistical significance alone. Our aim is to provide a prototype that could be refined for an approach toward the concept of a "translational taxonomy."}, } @article {pmid38544154, year = {2024}, author = {Kang, S and Lee, S and Jung, Y}, title = {Design of Network-on-Chip-Based Restricted Coulomb Energy Neural Network Accelerator on FPGA Device.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {6}, pages = {}, pmid = {38544154}, issn = {1424-8220}, support = {00144288, 00144290//Ministry of Trade, Industry and Energy/ ; }, abstract = {Sensor applications in internet of things (IoT) systems, coupled with artificial intelligence (AI) technology, are becoming an increasingly significant part of modern life. For low-latency AI computation in IoT systems, there is a growing preference for edge-based computing over cloud-based alternatives. The restricted coulomb energy neural network (RCE-NN) is a machine learning algorithm well-suited for implementation on edge devices due to its simple learning and recognition scheme. In addition, because the RCE-NN generates neurons as needed, it is easy to adjust the network structure and learn additional data. Therefore, the RCE-NN can provide edge-based real-time processing for various sensor applications. However, previous RCE-NN accelerators have limited scalability when the number of neurons increases. In this paper, we propose a network-on-chip (NoC)-based RCE-NN accelerator and present the results of implementation on a field-programmable gate array (FPGA). NoC is an effective solution for managing massive interconnections. The proposed RCE-NN accelerator utilizes a hierarchical-star (H-star) topology, which efficiently handles a large number of neurons, along with routers specifically designed for the RCE-NN. These approaches result in only a slight decrease in the maximum operating frequency as the number of neurons increases. Consequently, the maximum operating frequency of the proposed RCE-NN accelerator with 512 neurons increased by 126.1% compared to a previous RCE-NN accelerator. This enhancement was verified with two datasets for gas and sign language recognition, achieving accelerations of up to 54.8% in learning time and up to 45.7% in recognition time. The NoC scheme of the proposed RCE-NN accelerator is an appropriate solution to ensure the scalability of the neural network while providing high-performance on-chip learning and recognition.}, } @article {pmid38544035, year = {2024}, author = {Zhan, Y and Xie, W and Shi, R and Huang, Y and Zheng, X}, title = {Dynamic Privacy-Preserving Anonymous Authentication Scheme for Condition-Matching in Fog-Cloud-Based VANETs.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {6}, pages = {}, pmid = {38544035}, issn = {1424-8220}, support = {61872091//National Natural Science Foundation of China/ ; JCKY20 19102C001//National Defense Basic Research Program of China/ ; 62372110//National Natural Science Foundation of China/ ; 2023J02008//Fujian Provincial Natural Science of Foundation/ ; 2020B0101090005//Key-Area Research and Development Program of Guangdong Province/ ; YSPTZX202145//The specific research fund of The Innovation Platform for Academician of Hainan Province/ ; 2022HZ022022//Major Special Project for Industrial Science and Technology in Fujian Province/ ; 2022H0012//Industrial Guiding Project in Fujian/ ; 2022L3003//Special Project of Central Finance Guiding Local Development/ ; }, abstract = {Secure group communication in Vehicle Ad hoc Networks (VANETs) over open channels remains a challenging task. To enable secure group communications with conditional privacy, it is necessary to establish a secure session using Authenticated Key Agreement (AKA). However, existing AKAs suffer from problems such as cross-domain dynamic group session key negotiation and heavy computational burdens on the Trusted Authority (TA) and vehicles. To address these challenges, we propose a dynamic privacy-preserving anonymous authentication scheme for condition matching in fog-cloud-based VANETs. The scheme employs general Elliptic Curve Cryptosystem (ECC) technology and fog-cloud computing methods to decrease computational overhead for On-Board Units (OBUs) and supports multiple TAs for improved service quality and robustness. Furthermore, certificateless technology alleviates TAs of key management burdens. The security analysis indicates that our solution satisfies the communication security and privacy requirements. Experimental simulations verify that our method achieves optimal overall performance with lower computational costs and smaller communication overhead compared to state-of-the-art solutions.}, } @article {pmid38540411, year = {2024}, author = {Yuan, DY and Park, JH and Li, Z and Thomas, R and Hwang, DM and Fu, L}, title = {A New Cloud-Native Tool for Pharmacogenetic Analysis.}, journal = {Genes}, volume = {15}, number = {3}, pages = {}, pmid = {38540411}, issn = {2073-4425}, support = {LMMD Strategic Innovation Fund//Sunnybrook Health Sciences Centre/ ; }, mesh = {Humans ; *Pharmacogenomic Testing ; *Pharmacogenetics/methods ; High-Throughput Nucleotide Sequencing/methods ; Genomics/methods ; Computational Biology ; }, abstract = {BACKGROUND: The advancement of next-generation sequencing (NGS) technologies provides opportunities for large-scale Pharmacogenetic (PGx) studies and pre-emptive PGx testing to cover a wide range of genotypes present in diverse populations. However, NGS-based PGx testing is limited by the lack of comprehensive computational tools to support genetic data analysis and clinical decisions.

METHODS: Bioinformatics utilities specialized for human genomics and the latest cloud-based technologies were used to develop a bioinformatics pipeline for analyzing the genomic sequence data and reporting PGx genotypes. A database was created and integrated in the pipeline for filtering the actionable PGx variants and clinical interpretations. Strict quality verification procedures were conducted on variant calls with the whole genome sequencing (WGS) dataset of the 1000 Genomes Project (G1K). The accuracy of PGx allele identification was validated using the WGS dataset of the Pharmacogenetics Reference Materials from the Centers for Disease Control and Prevention (CDC).

RESULTS: The newly created bioinformatics pipeline, Pgxtools, can analyze genomic sequence data, identify actionable variants in 13 PGx relevant genes, and generate reports annotated with specific interpretations and recommendations based on clinical practice guidelines. Verified with two independent methods, we have found that Pgxtools consistently identifies variants more accurately than the results in the G1K dataset on GRCh37 and GRCh38.

CONCLUSIONS: Pgxtools provides an integrated workflow for large-scale genomic data analysis and PGx clinical decision support. Implemented with cloud-native technologies, it is highly portable in a wide variety of environments from a single laptop to High-Performance Computing (HPC) clusters and cloud platforms for different production scales and requirements.}, } @article {pmid38535044, year = {2024}, author = {Kukkar, A and Kumar, Y and Sandhu, JK and Kaur, M and Walia, TS and Amoon, M}, title = {DengueFog: A Fog Computing-Enabled Weighted Random Forest-Based Smart Health Monitoring System for Automatic Dengue Prediction.}, journal = {Diagnostics (Basel, Switzerland)}, volume = {14}, number = {6}, pages = {}, pmid = {38535044}, issn = {2075-4418}, abstract = {Dengue is a distinctive and fatal infectious disease that spreads through female mosquitoes called Aedes aegypti. It is a notable concern for developing countries due to its low diagnosis rate. Dengue has the most astounding mortality level as compared to other diseases due to tremendous platelet depletion. Hence, it can be categorized as a life-threatening fever as compared to the same class of fevers. Additionally, it has been shown that dengue fever shares many of the same symptoms as other flu-based fevers. On the other hand, the research community is closely monitoring the popular research fields related to IoT, fog, and cloud computing for the diagnosis and prediction of diseases. IoT, fog, and cloud-based technologies are used for constructing a number of health care systems. Accordingly, in this study, a DengueFog monitoring system was created based on fog computing for prediction and detection of dengue sickness. Additionally, the proposed DengueFog system includes a weighted random forest (WRF) classifier to monitor and predict the dengue infection. The proposed system's efficacy was evaluated using data on dengue infection. This dataset was gathered between 2016 and 2018 from several hospitals in the Delhi-NCR region. The accuracy, F-value, recall, precision, error rate, and specificity metrics were used to assess the simulation results of the suggested monitoring system. It was demonstrated that the proposed DengueFog monitoring system with WRF outperforms the traditional classifiers.}, } @article {pmid38531975, year = {2024}, author = {Ali, I and Wassif, K and Bayomi, H}, title = {Dimensionality reduction for images of IoT using machine learning.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {7205}, pmid = {38531975}, issn = {2045-2322}, abstract = {Sensors, wearables, mobile devices, and other Internet of Things (IoT) devices are becoming increasingly integrated into all aspects of our lives. They are capable of gathering enormous amounts of data, such as image data, which can then be sent to the cloud for processing. However, this results in an increase in network traffic and latency. To overcome these difficulties, edge computing has been proposed as a paradigm for computing that brings processing closer to the location where data is produced. This paper explores the merging of cloud and edge computing for IoT and investigates approaches using machine learning for dimensionality reduction of images on the edge, employing the autoencoder deep learning-based approach and principal component analysis (PCA). The encoded data is then sent to the cloud server, where it is used directly for any machine learning task without significantly impacting the accuracy of the data processed in the cloud. The proposed approach has been evaluated on an object detection task using a set of 4000 images randomly chosen from three datasets: COCO, human detection, and HDA datasets. Results show that a 77% reduction in data did not have a significant impact on the object detection task's accuracy.}, } @article {pmid38531933, year = {2024}, author = {Huettmann, F and Andrews, P and Steiner, M and Das, AK and Philip, J and Mi, C and Bryans, N and Barker, B}, title = {A super SDM (species distribution model) 'in the cloud' for better habitat-association inference with a 'big data' application of the Great Gray Owl for Alaska.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {7213}, pmid = {38531933}, issn = {2045-2322}, support = {-EWHALE lab//University of Alaska Fairbanks/ ; -EWHALE lab//University of Alaska Fairbanks/ ; }, abstract = {The currently available distribution and range maps for the Great Grey Owl (GGOW; Strix nebulosa) are ambiguous, contradictory, imprecise, outdated, often hand-drawn and thus not quantified, not based on data or scientific. In this study, we present a proof of concept with a biological application for technical and biological workflow progress on latest global open access 'Big Data' sharing, Open-source methods of R and geographic information systems (OGIS and QGIS) assessed with six recent multi-evidence citizen-science sightings of the GGOW. This proposed workflow can be applied for quantified inference for any species-habitat model such as typically applied with species distribution models (SDMs). Using Random Forest-an ensemble-type model of Machine Learning following Leo Breiman's approach of inference from predictions-we present a Super SDM for GGOWs in Alaska running on Oracle Cloud Infrastructure (OCI). These Super SDMs were based on best publicly available data (410 occurrences + 1% new assessment sightings) and over 100 environmental GIS habitat predictors ('Big Data'). The compiled global open access data and the associated workflow overcome for the first time the limitations of traditionally used PC and laptops. It breaks new ground and has real-world implications for conservation and land management for GGOW, for Alaska, and for other species worldwide as a 'new' baseline. As this research field remains dynamic, Super SDMs can have limits, are not the ultimate and final statement on species-habitat associations yet, but they summarize all publicly available data and information on a topic in a quantified and testable fashion allowing fine-tuning and improvements as needed. At minimum, they allow for low-cost rapid assessment and a great leap forward to be more ecological and inclusive of all information at-hand. Using GGOWs, here we aim to correct the perception of this species towards a more inclusive, holistic, and scientifically correct assessment of this urban-adapted owl in the Anthropocene, rather than a mysterious wilderness-inhabiting species (aka 'Phantom of the North'). Such a Super SDM was never created for any bird species before and opens new perspectives for impact assessment policy and global sustainability.}, } @article {pmid38528619, year = {2024}, author = {Budge, J and Carrell, T and Yaqub, M and Wafa, H and Waltham, M and Pilecka, I and Kelly, J and Murphy, C and Palmer, S and Wang, Y and Clough, RE}, title = {The ARIA trial protocol: a randomised controlled trial to assess the clinical, technical, and cost-effectiveness of a cloud-based, ARtificially Intelligent image fusion system in comparison to standard treatment to guide endovascular Aortic aneurysm repair.}, journal = {Trials}, volume = {25}, number = {1}, pages = {214}, pmid = {38528619}, issn = {1745-6215}, support = {NIHR201004//Invention for Innovation Programme/ ; }, mesh = {Humans ; *Aortic Aneurysm, Abdominal/diagnostic imaging/surgery ; Cost-Benefit Analysis ; Cloud Computing ; *Endovascular Procedures/methods ; *Blood Vessel Prosthesis Implantation/adverse effects ; Treatment Outcome ; Retrospective Studies ; Randomized Controlled Trials as Topic ; Multicenter Studies as Topic ; }, abstract = {BACKGROUND: Endovascular repair of aortic aneurysmal disease is established due to perceived advantages in patient survival, reduced postoperative complications, and shorter hospital lengths of stay. High spatial and contrast resolution 3D CT angiography images are used to plan the procedures and inform device selection and manufacture, but in standard care, the surgery is performed using image-guidance from 2D X-ray fluoroscopy with injection of nephrotoxic contrast material to visualise the blood vessels. This study aims to assess the benefit to patients, practitioners, and the health service of a novel image fusion medical device (Cydar EV), which allows this high-resolution 3D information to be available to operators at the time of surgery.

METHODS: The trial is a multi-centre, open label, two-armed randomised controlled clinical trial of 340 patient, randomised 1:1 to either standard treatment in endovascular aneurysm repair or treatment using Cydar EV, a CE-marked medical device comprising of cloud computing, augmented intelligence, and computer vision. The primary outcome is procedural time, with secondary outcomes of procedural efficiency, technical effectiveness, patient outcomes, and cost-effectiveness. Patients with a clinical diagnosis of AAA or TAAA suitable for endovascular repair and able to provide written informed consent will be invited to participate.

DISCUSSION: This trial is the first randomised controlled trial evaluating advanced image fusion technology in endovascular aortic surgery and is well placed to evaluate the effect of this technology on patient outcomes and cost to the NHS.

TRIAL REGISTRATION: ISRCTN13832085. Dec. 3, 2021.}, } @article {pmid38528564, year = {2024}, author = {Zhang, S and Li, H and Jing, Q and Shen, W and Luo, W and Dai, R}, title = {Anesthesia decision analysis using a cloud-based big data platform.}, journal = {European journal of medical research}, volume = {29}, number = {1}, pages = {201}, pmid = {38528564}, issn = {2047-783X}, support = {2022JJ70061//Natural Science Foundation of Hunan Province/ ; 22A0011//Key Fund Project of Hunan Provincial Department of Education/ ; W20243113//Health Commission of Hunan Province/ ; 82103641 and 82071347//National Natural Science Foundation of China/ ; }, mesh = {Humans ; Big Data ; *Anesthesiology ; Cloud Computing ; *Anesthesia ; *Anesthetics ; Decision Support Techniques ; }, abstract = {Big data technologies have proliferated since the dawn of the cloud-computing era. Traditional data storage, extraction, transformation, and analysis technologies have thus become unsuitable for the large volume, diversity, high processing speed, and low value density of big data in medical strategies, which require the development of novel big data application technologies. In this regard, we investigated the most recent big data platform breakthroughs in anesthesiology and designed an anesthesia decision model based on a cloud system for storing and analyzing massive amounts of data from anesthetic records. The presented Anesthesia Decision Analysis Platform performs distributed computing on medical records via several programming tools, and provides services such as keyword search, data filtering, and basic statistics to reduce inaccurate and subjective judgments by decision-makers. Importantly, it can potentially to improve anesthetic strategy and create individualized anesthesia decisions, lowering the likelihood of perioperative complications.}, } @article {pmid38524844, year = {2024}, author = {Mukuka, A}, title = {Data on mathematics teacher educators' proficiency and willingness to use technology: A structural equation modelling analysis.}, journal = {Data in brief}, volume = {54}, number = {}, pages = {110307}, pmid = {38524844}, issn = {2352-3409}, abstract = {The role of Mathematics Teacher Educators (MTEs) in preparing future teachers to effectively integrate technology into their mathematics instruction is of paramount importance yet remains an underexplored domain. Technology has the potential to enhance the development of 21st-century skills, such as problem-solving and critical thinking, which are essential for students in the era of the fourth industrial revolution. However, the rapid evolution of technology and the emergence of new trends like data analytics, the Internet of Things, machine learning, cloud computing, and artificial intelligence present new challenges in the realm of mathematics teaching and learning. Consequently, MTEs need to equip prospective teachers with the knowledge and skills to harness technology in innovative ways within their future mathematics classrooms. This paper presents and describes data from a survey of 104 MTEs in Zambia. The study focuses on MTEs' proficiency, perceived usefulness, perceived ease of use, and willingness to incorporate technology in their classrooms. This data-driven article aims to unveil patterns and trends within the dataset, with the objective of offering insights rather than drawing definitive conclusions. The article also highlights the data collection process and outlines the procedure for assessing the measurement model of the hypothesised relationships among variables through structural equation modelling analysis. The data described in this article not only sheds light on the current landscape but also serves as a valuable resource for mathematics teacher training institutions and other stakeholders seeking to understand the requisites for MTEs to foster technological skills among prospective teachers of mathematics.}, } @article {pmid38520921, year = {2024}, author = {Tadi, AA and Alhadidi, D and Rueda, L}, title = {PPPCT: Privacy-Preserving framework for Parallel Clustering Transcriptomics data.}, journal = {Computers in biology and medicine}, volume = {173}, number = {}, pages = {108351}, doi = {10.1016/j.compbiomed.2024.108351}, pmid = {38520921}, issn = {1879-0534}, mesh = {Humans ; *Privacy ; *Software ; Algorithms ; Gene Expression Profiling ; Cluster Analysis ; Sequence Analysis, RNA ; }, abstract = {Single-cell transcriptomics data provides crucial insights into patients' health, yet poses significant privacy concerns. Genomic data privacy attacks can have deep implications, encompassing not only the patients' health information but also extending widely to compromise their families'. Moreover, the permanence of leaked data exacerbates the challenges, making retraction an impossibility. While extensive efforts have been directed towards clustering single-cell transcriptomics data, addressing critical challenges, especially in the realm of privacy, remains pivotal. This paper introduces an efficient, fast, privacy-preserving approach for clustering single-cell RNA-sequencing (scRNA-seq) datasets. The key contributions include ensuring data privacy, achieving high-quality clustering, accommodating the high dimensionality inherent in the datasets, and maintaining reasonable computation time for big-scale datasets. Our proposed approach utilizes the map-reduce scheme to parallelize clustering, addressing intensive calculation challenges. Intel Software Guard eXtension (SGX) processors are used to ensure the security of sensitive code and data during processing. Additionally, the approach incorporates a logarithm transformation as a preprocessing step, employs non-negative matrix factorization for dimensionality reduction, and utilizes parallel k-means for clustering. The approach fully leverages the computing capabilities of all processing resources within a secure private cloud environment. Experimental results demonstrate the efficacy of our approach in preserving patient privacy while surpassing state-of-the-art methods in both clustering quality and computation time. Our method consistently achieves a minimum of 7% higher Adjusted Rand Index (ARI) than existing approaches, contingent on dataset size. Additionally, due to parallel computations and dimensionality reduction, our approach exhibits efficiency, converging to very good results in less than 10 seconds for a scRNA-seq dataset with 5000 genes and 6000 cells when prioritizing privacy and under two seconds without privacy considerations. Availability and implementation Code and datasets availability: https://github.com/University-of-Windsor/PPPCT.}, } @article {pmid38514837, year = {2024}, author = {Hajiaghabozorgi, M and Fischbach, M and Albrecht, M and Wang, W and Myers, CL}, title = {BridGE: a pathway-based analysis tool for detecting genetic interactions from GWAS.}, journal = {Nature protocols}, volume = {19}, number = {5}, pages = {1400-1435}, pmid = {38514837}, issn = {1750-2799}, support = {R01 HG005084/HG/NHGRI NIH HHS/United States ; R21 CA235352/CA/NCI NIH HHS/United States ; BAND-19-615151//Weston Brain Institute/ ; R01 HG005853/HG/NHGRI NIH HHS/United States ; R21CA235352//U.S. Department of Health & Human Services | NIH | Center for Information Technology (Center for Information Technology, National Institutes of Health)/ ; R01HG005084//U.S. Department of Health & Human Services | NIH | Center for Information Technology (Center for Information Technology, National Institutes of Health)/ ; R01HG005853//U.S. Department of Health & Human Services | NIH | Center for Information Technology (Center for Information Technology, National Institutes of Health)/ ; }, mesh = {*Genome-Wide Association Study/methods ; Humans ; *Software ; *Epistasis, Genetic ; Computational Biology/methods ; }, abstract = {Genetic interactions have the potential to modulate phenotypes, including human disease. In principle, genome-wide association studies (GWAS) provide a platform for detecting genetic interactions; however, traditional methods for identifying them, which tend to focus on testing individual variant pairs, lack statistical power. In this protocol, we describe a novel computational approach, called Bridging Gene sets with Epistasis (BridGE), for discovering genetic interactions between biological pathways from GWAS data. We present a Python-based implementation of BridGE along with instructions for its application to a typical human GWAS cohort. The major stages include initial data processing and quality control, construction of a variant-level genetic interaction network, measurement of pathway-level genetic interactions, evaluation of statistical significance using sample permutations and generation of results in a standardized output format. The BridGE software pipeline includes options for running the analysis on multiple cores and multiple nodes for users who have access to computing clusters or a cloud computing environment. In a cluster computing environment with 10 nodes and 100 GB of memory per node, the method can be run in less than 24 h for typical human GWAS cohorts. Using BridGE requires knowledge of running Python programs and basic shell script programming experience.}, } @article {pmid38506901, year = {2024}, author = {Sahu, KS and Dubin, JA and Majowicz, SE and Liu, S and Morita, PP}, title = {Revealing the Mysteries of Population Mobility Amid the COVID-19 Pandemic in Canada: Comparative Analysis With Internet of Things-Based Thermostat Data and Google Mobility Insights.}, journal = {JMIR public health and surveillance}, volume = {10}, number = {}, pages = {e46903}, pmid = {38506901}, issn = {2369-2960}, mesh = {Humans ; Pandemics ; *Internet of Things ; Search Engine ; *COVID-19/epidemiology ; Alberta/epidemiology ; Health Policy ; }, abstract = {BACKGROUND: The COVID-19 pandemic necessitated public health policies to limit human mobility and curb infection spread. Human mobility, which is often underestimated, plays a pivotal role in health outcomes, impacting both infectious and chronic diseases. Collecting precise mobility data is vital for understanding human behavior and informing public health strategies. Google's GPS-based location tracking, which is compiled in Google Mobility Reports, became the gold standard for monitoring outdoor mobility during the pandemic. However, indoor mobility remains underexplored.

OBJECTIVE: This study investigates in-home mobility data from ecobee's smart thermostats in Canada (February 2020 to February 2021) and compares it directly with Google's residential mobility data. By assessing the suitability of smart thermostat data, we aim to shed light on indoor mobility patterns, contributing valuable insights to public health research and strategies.

METHODS: Motion sensor data were acquired from the ecobee "Donate Your Data" initiative via Google's BigQuery cloud platform. Concurrently, residential mobility data were sourced from the Google Mobility Report. This study centered on 4 Canadian provinces-Ontario, Quebec, Alberta, and British Columbia-during the period from February 15, 2020, to February 14, 2021. Data processing, analysis, and visualization were conducted on the Microsoft Azure platform using Python (Python Software Foundation) and R programming languages (R Foundation for Statistical Computing). Our investigation involved assessing changes in mobility relative to the baseline in both data sets, with the strength of this relationship assessed using Pearson and Spearman correlation coefficients. We scrutinized daily, weekly, and monthly variations in mobility patterns across the data sets and performed anomaly detection for further insights.

RESULTS: The results revealed noteworthy week-to-week and month-to-month shifts in population mobility within the chosen provinces, aligning with pandemic-driven policy adjustments. Notably, the ecobee data exhibited a robust correlation with Google's data set. Examination of Google's daily patterns detected more pronounced mobility fluctuations during weekdays, a trend not mirrored in the ecobee data. Anomaly detection successfully identified substantial mobility deviations coinciding with policy modifications and cultural events.

CONCLUSIONS: This study's findings illustrate the substantial influence of the Canadian stay-at-home and work-from-home policies on population mobility. This impact was discernible through both Google's out-of-house residential mobility data and ecobee's in-house smart thermostat data. As such, we deduce that smart thermostats represent a valid tool for facilitating intelligent monitoring of population mobility in response to policy-driven shifts.}, } @article {pmid38495592, year = {2024}, author = {Wang, H and Chen, H and Wang, Y}, title = {Analysis of Hot Topics Regarding Global Smart Elderly Care Research - 1997-2021.}, journal = {China CDC weekly}, volume = {6}, number = {9}, pages = {157-161}, pmid = {38495592}, issn = {2096-7071}, abstract = {With the assistance of the internet, big data, cloud computing, and other technologies, the concept of smart elderly care has emerged.

WHAT IS ADDED BY THIS REPORT?: This study presents information on the countries or regions that have conducted research on smart elderly care, as well as identifies global hotspots and development trends in this field.

The results of this study suggest that future research should focus on fall detection, health monitoring, and guidance systems that are user-friendly and contribute to the creation of smarter safer communities for the well-being of the elderly.}, } @article {pmid38495055, year = {2024}, author = {Li, J and Xiong, Y and Feng, S and Pan, C and Guo, X}, title = {CloudProteoAnalyzer: scalable processing of big data from proteomics using cloud computing.}, journal = {Bioinformatics advances}, volume = {4}, number = {1}, pages = {vbae024}, pmid = {38495055}, issn = {2635-0041}, support = {R01 AT011618/AT/NCCIH NIH HHS/United States ; R15 LM013460/LM/NLM NIH HHS/United States ; }, abstract = {SUMMARY: Shotgun proteomics is widely used in many system biology studies to determine the global protein expression profiles of tissues, cultures, and microbiomes. Many non-distributed computer algorithms have been developed for users to process proteomics data on their local computers. However, the amount of data acquired in a typical proteomics study has grown rapidly in recent years, owing to the increasing throughput of mass spectrometry and the expanding scale of study designs. This presents a big data challenge for researchers to process proteomics data in a timely manner. To overcome this challenge, we developed a cloud-based parallel computing application to offer end-to-end proteomics data analysis software as a service (SaaS). A web interface was provided to users to upload mass spectrometry-based proteomics data, configure parameters, submit jobs, and monitor job status. The data processing was distributed across multiple nodes in a supercomputer to achieve scalability for large datasets. Our study demonstrated SaaS for proteomics as a viable solution for the community to scale up the data processing using cloud computing.

This application is available online at https://sipros.oscer.ou.edu/ or https://sipros.unt.edu for free use. The source code is available at https://github.com/Biocomputing-Research-Group/CloudProteoAnalyzer under the GPL version 3.0 license.}, } @article {pmid38491365, year = {2024}, author = {Clements, J and Goina, C and Hubbard, PM and Kawase, T and Olbris, DJ and Otsuna, H and Svirskas, R and Rokicki, K}, title = {NeuronBridge: an intuitive web application for neuronal morphology search across large data sets.}, journal = {BMC bioinformatics}, volume = {25}, number = {1}, pages = {114}, pmid = {38491365}, issn = {1471-2105}, mesh = {Animals ; *Software ; Neurons ; *Connectome ; Microscopy, Electron ; Drosophila ; }, abstract = {BACKGROUND: Neuroscience research in Drosophila is benefiting from large-scale connectomics efforts using electron microscopy (EM) to reveal all the neurons in a brain and their connections. To exploit this knowledge base, researchers relate a connectome's structure to neuronal function, often by studying individual neuron cell types. Vast libraries of fly driver lines expressing fluorescent reporter genes in sets of neurons have been created and imaged using confocal light microscopy (LM), enabling the targeting of neurons for experimentation. However, creating a fly line for driving gene expression within a single neuron found in an EM connectome remains a challenge, as it typically requires identifying a pair of driver lines where only the neuron of interest is expressed in both. This task and other emerging scientific workflows require finding similar neurons across large data sets imaged using different modalities.

RESULTS: Here, we present NeuronBridge, a web application for easily and rapidly finding putative morphological matches between large data sets of neurons imaged using different modalities. We describe the functionality and construction of the NeuronBridge service, including its user-friendly graphical user interface (GUI), extensible data model, serverless cloud architecture, and massively parallel image search engine.

CONCLUSIONS: NeuronBridge fills a critical gap in the Drosophila research workflow and is used by hundreds of neuroscience researchers around the world. We offer our software code, open APIs, and processed data sets for integration and reuse, and provide the application as a service at http://neuronbridge.janelia.org .}, } @article {pmid38475170, year = {2024}, author = {Tripathi, A and Waqas, A and Venkatesan, K and Yilmaz, Y and Rasool, G}, title = {Building Flexible, Scalable, and Machine Learning-Ready Multimodal Oncology Datasets.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {5}, pages = {}, pmid = {38475170}, issn = {1424-8220}, support = {2234836//National Science Foundation/ ; 2234468//National Science Foundation/ ; 1903466//National Science Foundation/ ; }, mesh = {Humans ; Reproducibility of Results ; *Neoplasms ; }, abstract = {The advancements in data acquisition, storage, and processing techniques have resulted in the rapid growth of heterogeneous medical data. Integrating radiological scans, histopathology images, and molecular information with clinical data is essential for developing a holistic understanding of the disease and optimizing treatment. The need for integrating data from multiple sources is further pronounced in complex diseases such as cancer for enabling precision medicine and personalized treatments. This work proposes Multimodal Integration of Oncology Data System (MINDS)-a flexible, scalable, and cost-effective metadata framework for efficiently fusing disparate data from public sources such as the Cancer Research Data Commons (CRDC) into an interconnected, patient-centric framework. MINDS consolidates over 41,000 cases from across repositories while achieving a high compression ratio relative to the 3.78 PB source data size. It offers sub-5-s query response times for interactive exploration. MINDS offers an interface for exploring relationships across data types and building cohorts for developing large-scale multimodal machine learning models. By harmonizing multimodal data, MINDS aims to potentially empower researchers with greater analytical ability to uncover diagnostic and prognostic insights and enable evidence-based personalized care. MINDS tracks granular end-to-end data provenance, ensuring reproducibility and transparency. The cloud-native architecture of MINDS can handle exponential data growth in a secure, cost-optimized manner while ensuring substantial storage optimization, replication avoidance, and dynamic access capabilities. Auto-scaling, access controls, and other mechanisms guarantee pipelines' scalability and security. MINDS overcomes the limitations of existing biomedical data silos via an interoperable metadata-driven approach that represents a pivotal step toward the future of oncology data integration.}, } @article {pmid38475051, year = {2024}, author = {Gaba, P and Raw, RS and Kaiwartya, O and Aljaidi, M}, title = {B-SAFE: Blockchain-Enabled Security Architecture for Connected Vehicle Fog Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {5}, pages = {}, pmid = {38475051}, issn = {1424-8220}, support = {00//nottingham trent university/ ; }, abstract = {Vehicles are no longer stand-alone mechanical entities due to the advancements in vehicle-to-vehicle (V2V) and vehicle-to-infrastructure (V2I) communication-centric Internet of Connected Vehicles (IoV) frameworks. However, the advancement in connected vehicles leads to another serious security threat, online vehicle hijacking, where the steering control of vehicles can be hacked online. The feasibility of traditional security solutions in IoV environments is very limited, considering the intermittent network connectivity to cloud servers and vehicle-centric computing capability constraints. In this context, this paper presents a Blockchain-enabled Security Architecture for a connected vehicular Fog networking Environment (B-SAFE). Firstly, blockchain security and vehicular fog networking are introduced as preliminaries of the framework. Secondly, a three-layer architecture of B-SAFE is presented, focusing on vehicular communication, blockchain at fog nodes, and the cloud as trust and reward management for vehicles. Thirdly, details of the blockchain implementation at fog nodes is presented, along with a flowchart and algorithm. The performance of the evaluation of the proposed framework B-SAFE attests to the benefits in terms of trust, reward points, and threshold calculation.}, } @article {pmid38474954, year = {2024}, author = {Vercheval, N and Royen, R and Munteanu, A and Pižurica, A}, title = {PCGen: A Fully Parallelizable Point Cloud Generative Model.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {5}, pages = {}, pmid = {38474954}, issn = {1424-8220}, support = {174B0911//Flanders AI Research Programme/ ; G094122N//Fonds Wetenschappelijk Onderzoek (FWO) project/ ; }, abstract = {Generative models have the potential to revolutionize 3D extended reality. A primary obstacle is that augmented and virtual reality need real-time computing. Current state-of-the-art point cloud random generation methods are not fast enough for these applications. We introduce a vector-quantized variational autoencoder model (VQVAE) that can synthesize high-quality point clouds in milliseconds. Unlike previous work in VQVAEs, our model offers a compact sample representation suitable for conditional generation and data exploration with potential applications in rapid prototyping. We achieve this result by combining architectural improvements with an innovative approach for probabilistic random generation. First, we rethink current parallel point cloud autoencoder structures, and we propose several solutions to improve robustness, efficiency and reconstruction quality. Notable contributions in the decoder architecture include an innovative computation layer to process the shape semantic information, an attention mechanism that helps the model focus on different areas and a filter to cover possible sampling errors. Secondly, we introduce a parallel sampling strategy for VQVAE models consisting of a double encoding system, where a variational autoencoder learns how to generate the complex discrete distribution of the VQVAE, not only allowing quick inference but also describing the shape with a few global variables. We compare the proposed decoder and our VQVAE model with established and concurrent work, and we prove, one by one, the validity of the single contributions.}, } @article {pmid38474952, year = {2024}, author = {AlSaleh, I and Al-Samawi, A and Nissirat, L}, title = {Novel Machine Learning Approach for DDoS Cloud Detection: Bayesian-Based CNN and Data Fusion Enhancements.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {5}, pages = {}, pmid = {38474952}, issn = {1424-8220}, support = {GRANT5,340//King Faisal University/ ; }, abstract = {Cloud computing has revolutionized the information technology landscape, offering businesses the flexibility to adapt to diverse business models without the need for costly on-site servers and network infrastructure. A recent survey reveals that 95% of enterprises have already embraced cloud technology, with 79% of their workloads migrating to cloud environments. However, the deployment of cloud technology introduces significant cybersecurity risks, including network security vulnerabilities, data access control challenges, and the ever-looming threat of cyber-attacks such as Distributed Denial of Service (DDoS) attacks, which pose substantial risks to both cloud and network security. While Intrusion Detection Systems (IDS) have traditionally been employed for DDoS attack detection, prior studies have been constrained by various limitations. In response to these challenges, we present an innovative machine learning approach for DDoS cloud detection, known as the Bayesian-based Convolutional Neural Network (BaysCNN) model. Leveraging the CICDDoS2019 dataset, which encompasses 88 features, we employ Principal Component Analysis (PCA) for dimensionality reduction. Our BaysCNN model comprises 19 layers of analysis, forming the basis for training and validation. Our experimental findings conclusively demonstrate that the BaysCNN model significantly enhances the accuracy of DDoS cloud detection, achieving an impressive average accuracy rate of 99.66% across 13 multi-class attacks. To further elevate the model's performance, we introduce the Data Fusion BaysFusCNN approach, encompassing 27 layers. By leveraging Bayesian methods to estimate uncertainties and integrating features from multiple sources, this approach attains an even higher average accuracy of 99.79% across the same 13 multi-class attacks. Our proposed methodology not only offers valuable insights for the development of robust machine learning-based intrusion detection systems but also enhances the reliability and scalability of IDS in cloud computing environments. This empowers organizations to proactively mitigate security risks and fortify their defenses against malicious cyber-attacks.}, } @article {pmid38469580, year = {2024}, author = {Yakubu, B and Appiah, EM and Adu, AF}, title = {Pangenome Analysis of Helicobacter pylori Isolates from Selected Areas of Africa Indicated Diverse Antibiotic Resistance and Virulence Genes.}, journal = {International journal of genomics}, volume = {2024}, number = {}, pages = {5536117}, pmid = {38469580}, issn = {2314-4378}, abstract = {The challenge facing Helicobacter pylori (H. pylori) infection management in some parts of Africa is the evolution of drug-resistant species, the lack of gold standard in diagnostic methods, and the ineffectiveness of current vaccines against the bacteria. It is being established that even though clinical consequences linked to the bacteria vary geographically, there is rather a generic approach to treatment. This situation has remained problematic in the successful fight against the bacteria in parts of Africa. As a result, this study compared the genomes of selected H. pylori isolates from selected areas of Africa and evaluated their virulence and antibiotic drug resistance, those that are highly pathogenic and are associated with specific clinical outcomes and those that are less virulent and rarely associated with clinical outcomes. 146 genomes of H. pylori isolated from selected locations of Africa were sampled, and bioinformatic tools such as Abricate, CARD RGI, MLST, Prokka, Roary, Phandango, Google Sheets, and iTOLS were used to compare the isolates and their antibiotic resistance or susceptibility. Over 20 k virulence and AMR genes were observed. About 95% of the isolates were genetically diverse, 90% of the isolates harbored shell genes, and 50% harbored cloud and core genes. Some isolates did not retain the cagA and vacA genes. Clarithromycin, metronidazole, amoxicillin, and tinidazole were resistant to most AMR genes (vacA, cagA, oip, and bab). Conclusion. This study found both virulence and AMR genes in all H. pylori strains in all the selected geographies around Africa with differing quantities. MLST, Pangenome, and ORF analyses showed disparities among the isolates. This in general could imply diversities in terms of genetics, evolution, and protein production. Therefore, generic administration of antibiotics such as clarithromycin, amoxicillin, and erythromycin as treatment methods in the African subregion could be contributing to the spread of the bacterium's antibiotic resistance.}, } @article {pmid38468957, year = {2024}, author = {Tripathy, SS and Bebortta, S and Chowdhary, CL and Mukherjee, T and Kim, S and Shafi, J and Ijaz, MF}, title = {FedHealthFog: A federated learning-enabled approach towards healthcare analytics over fog computing platform.}, journal = {Heliyon}, volume = {10}, number = {5}, pages = {e26416}, pmid = {38468957}, issn = {2405-8440}, abstract = {The emergence of federated learning (FL) technique in fog-enabled healthcare system has leveraged enhanced privacy towards safeguarding sensitive patient information over heterogeneous computing platforms. In this paper, we introduce the FedHealthFog framework, which was meticulously developed to overcome the difficulties of distributed learning in resource-constrained IoT-enabled healthcare systems, particularly those sensitive to delays and energy efficiency. Conventional federated learning approaches face challenges stemming from substantial compute requirements and significant communication costs. This is primarily due to their reliance on a singular server for the aggregation of global data, which results in inefficient training models. We present a transformational approach to address these problems by elevating strategically placed fog nodes to the position of local aggregators within the federated learning architecture. A sophisticated greedy heuristic technique is used to optimize the choice of a fog node as the global aggregator in each communication cycle between edge devices and the cloud. The FedHealthFog system notably accounts for drop in communication latency of 87.01%, 26.90%, and 71.74%, and energy consumption of 57.98%, 34.36%, and 35.37% respectively, for three benchmark algorithms analyzed in this study. The effectiveness of FedHealthFog is strongly supported by outcomes of our experiments compared to cutting-edge alternatives while simultaneously reducing number of global aggregation cycles. These findings highlight FedHealthFog's potential to transform federated learning in resource-constrained IoT environments for delay-sensitive applications.}, } @article {pmid38466691, year = {2024}, author = {Shafi, I and Din, S and Farooq, S and Díez, IT and Breñosa, J and Espinosa, JCM and Ashraf, I}, title = {Design and development of patient health tracking, monitoring and big data storage using Internet of Things and real time cloud computing.}, journal = {PloS one}, volume = {19}, number = {3}, pages = {e0298582}, pmid = {38466691}, issn = {1932-6203}, mesh = {Humans ; *Cloud Computing ; *Internet of Things ; Pandemics ; Monitoring, Physiologic ; Information Storage and Retrieval ; }, abstract = {With the outbreak of the COVID-19 pandemic, social isolation and quarantine have become commonplace across the world. IoT health monitoring solutions eliminate the need for regular doctor visits and interactions among patients and medical personnel. Many patients in wards or intensive care units require continuous monitoring of their health. Continuous patient monitoring is a hectic practice in hospitals with limited staff; in a pandemic situation like COVID-19, it becomes much more difficult practice when hospitals are working at full capacity and there is still a risk of medical workers being infected. In this study, we propose an Internet of Things (IoT)-based patient health monitoring system that collects real-time data on important health indicators such as pulse rate, blood oxygen saturation, and body temperature but can be expanded to include more parameters. Our system is comprised of a hardware component that collects and transmits data from sensors to a cloud-based storage system, where it can be accessed and analyzed by healthcare specialists. The ESP-32 microcontroller interfaces with the multiple sensors and wirelessly transmits the collected data to the cloud storage system. A pulse oximeter is utilized in our system to measure blood oxygen saturation and body temperature, as well as a heart rate monitor to measure pulse rate. A web-based interface is also implemented, allowing healthcare practitioners to access and visualize the collected data in real-time, making remote patient monitoring easier. Overall, our IoT-based patient health monitoring system represents a significant advancement in remote patient monitoring, allowing healthcare practitioners to access real-time data on important health metrics and detect potential health issues before they escalate.}, } @article {pmid38460568, year = {2024}, author = {Ghiandoni, GM and Evertsson, E and Riley, DJ and Tyrchan, C and Rathi, PC}, title = {Augmenting DMTA using predictive AI modelling at AstraZeneca.}, journal = {Drug discovery today}, volume = {29}, number = {4}, pages = {103945}, doi = {10.1016/j.drudis.2024.103945}, pmid = {38460568}, issn = {1878-5832}, mesh = {*Artificial Intelligence ; *Biological Assay ; Drug Discovery ; }, abstract = {Design-Make-Test-Analyse (DMTA) is the discovery cycle through which molecules are designed, synthesised, and assayed to produce data that in turn are analysed to inform the next iteration. The process is repeated until viable drug candidates are identified, often requiring many cycles before reaching a sweet spot. The advent of artificial intelligence (AI) and cloud computing presents an opportunity to innovate drug discovery to reduce the number of cycles needed to yield a candidate. Here, we present the Predictive Insight Platform (PIP), a cloud-native modelling platform developed at AstraZeneca. The impact of PIP in each step of DMTA, as well as its architecture, integration, and usage, are discussed and used to provide insights into the future of drug discovery.}, } @article {pmid38455562, year = {2024}, author = {Gokool, S and Mahomed, M and Brewer, K and Naiken, V and Clulow, A and Sibanda, M and Mabhaudhi, T}, title = {Crop mapping in smallholder farms using unmanned aerial vehicle imagery and geospatial cloud computing infrastructure.}, journal = {Heliyon}, volume = {10}, number = {5}, pages = {e26913}, pmid = {38455562}, issn = {2405-8440}, abstract = {Smallholder farms are major contributors to agricultural production, food security, and socio-economic growth in many developing countries. However, they generally lack the resources to fully maximize their potential. Subsequently they require innovative, evidence-based and lower-cost solutions to optimize their productivity. Recently, precision agricultural practices facilitated by unmanned aerial vehicles (UAVs) have gained traction in the agricultural sector and have great potential for smallholder farm applications. Furthermore, advances in geospatial cloud computing have opened new and exciting possibilities in the remote sensing arena. In light of these recent developments, the focus of this study was to explore and demonstrate the utility of using the advanced image processing capabilities of the Google Earth Engine (GEE) geospatial cloud computing platform to process and analyse a very high spatial resolution multispectral UAV image for mapping land use land cover (LULC) within smallholder farms. The results showed that LULC could be mapped at a 0.50 m spatial resolution with an overall accuracy of 91%. Overall, we found GEE to be an extremely useful platform for conducting advanced image analysis on UAV imagery and rapid communication of results. Notwithstanding the limitations of the study, the findings presented herein are quite promising and clearly demonstrate how modern agricultural practices can be implemented to facilitate improved agricultural management in smallholder farmers.}, } @article {pmid38453988, year = {2024}, author = {Inam, S and Kanwal, S and Firdous, R and Hajjej, F}, title = {Blockchain based medical image encryption using Arnold's cat map in a cloud environment.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {5678}, pmid = {38453988}, issn = {2045-2322}, abstract = {Improved software for processing medical images has inspired tremendous interest in modern medicine in recent years. Modern healthcare equipment generates huge amounts of data, such as scanned medical images and computerized patient information, which must be secured for future use. Diversity in the healthcare industry, namely in the form of medical data, is one of the largest challenges for researchers. Cloud environment and the Block chain technology have both demonstrated their own use. The purpose of this study is to combine both technologies for safe and secure transaction. Storing or sending medical data through public clouds exposes information into potential eavesdropping, data breaches and unauthorized access. Encrypting data before transmission is crucial to mitigate these security risks. As a result, a Blockchain based Chaotic Arnold's cat map Encryption Scheme (BCAES) is proposed in this paper. The BCAES first encrypts the image using Arnold's cat map encryption scheme and then sends the encrypted image into Cloud Server and stores the signed document of plain image into blockchain. As blockchain is often considered more secure due to its distributed nature and consensus mechanism, data receiver will ensure data integrity and authenticity of image after decryption using signed document stored into the blockchain. Various analysis techniques have been used to examine the proposed scheme. The results of analysis like key sensitivity analysis, key space analysis, Information Entropy, histogram correlation of adjacent pixels, Number of Pixel Change Rate, Peak Signal Noise Ratio, Unified Average Changing Intensity, and similarity analysis like Mean Square Error, and Structural Similarity Index Measure illustrated that our proposed scheme is an efficient encryption scheme as compared to some recent literature. Our current achievements surpass all previous endeavors, setting a new standard of excellence.}, } @article {pmid38452470, year = {2024}, author = {Zhong, C and Darbandi, M and Nassr, M and Latifian, A and Hosseinzadeh, M and Jafari Navimipour, N}, title = {A new cloud-based method for composition of healthcare services using deep reinforcement learning and Kalman filtering.}, journal = {Computers in biology and medicine}, volume = {172}, number = {}, pages = {108152}, doi = {10.1016/j.compbiomed.2024.108152}, pmid = {38452470}, issn = {1879-0534}, mesh = {Humans ; *Cloud Computing ; Reproducibility of Results ; *Delivery of Health Care ; }, abstract = {Healthcare has significantly contributed to the well-being of individuals around the globe; nevertheless, further benefits could be derived from a more streamlined healthcare system without incurring additional costs. Recently, the main attributes of cloud computing, such as on-demand service, high scalability, and virtualization, have brought many benefits across many areas, especially in medical services. It is considered an important element in healthcare services, enhancing the performance and efficacy of the services. The current state of the healthcare industry requires the supply of healthcare products and services, increasing its viability for everyone involved. Developing new approaches for discovering and selecting healthcare services in the cloud has become more critical due to the rising popularity of these kinds of services. As a result of the diverse array of healthcare services, service composition enables the execution of intricate operations by integrating multiple services' functionalities into a single procedure. However, many methods in this field encounter several issues, such as high energy consumption, cost, and response time. This article introduces a novel layered method for selecting and evaluating healthcare services to find optimal service selection and composition solutions based on Deep Reinforcement Learning (Deep RL), Kalman filtering, and repeated training, addressing the aforementioned issues. The results revealed that the proposed method has achieved acceptable results in terms of availability, reliability, energy consumption, and response time when compared to other methods.}, } @article {pmid38449567, year = {2024}, author = {Wang, J and Yin, J and Nguyen, MH and Wang, J and Xu, W}, title = {Editorial: Big scientific data analytics on HPC and cloud.}, journal = {Frontiers in big data}, volume = {7}, number = {}, pages = {1353988}, doi = {10.3389/fdata.2024.1353988}, pmid = {38449567}, issn = {2624-909X}, } @article {pmid38449564, year = {2024}, author = {Saad, M and Enam, RN and Qureshi, R}, title = {Optimizing multi-objective task scheduling in fog computing with GA-PSO algorithm for big data application.}, journal = {Frontiers in big data}, volume = {7}, number = {}, pages = {1358486}, pmid = {38449564}, issn = {2624-909X}, abstract = {As the volume and velocity of Big Data continue to grow, traditional cloud computing approaches struggle to meet the demands of real-time processing and low latency. Fog computing, with its distributed network of edge devices, emerges as a compelling solution. However, efficient task scheduling in fog computing remains a challenge due to its inherently multi-objective nature, balancing factors like execution time, response time, and resource utilization. This paper proposes a hybrid Genetic Algorithm (GA)-Particle Swarm Optimization (PSO) algorithm to optimize multi-objective task scheduling in fog computing environments. The hybrid approach combines the strengths of GA and PSO, achieving effective exploration and exploitation of the search space, leading to improved performance compared to traditional single-algorithm approaches. The proposed hybrid algorithm results improved the execution time by 85.68% when compared with GA algorithm, by 84% when compared with Hybrid PWOA and by 51.03% when compared with PSO algorithm as well as it improved the response time by 67.28% when compared with GA algorithm, by 54.24% when compared with Hybrid PWOA and by 75.40% when compared with PSO algorithm as well as it improved the completion time by 68.69% when compared with GA algorithm, by 98.91% when compared with Hybrid PWOA and by 75.90% when compared with PSO algorithm when various tasks inputs are given. The proposed hybrid algorithm results also improved the execution time by 84.87% when compared with GA algorithm, by 88.64% when compared with Hybrid PWOA and by 85.07% when compared with PSO algorithm it improved the response time by 65.92% when compared with GA algorithm, by 80.51% when compared with Hybrid PWOA and by 85.26% when compared with PSO algorithm as well as it improved the completion time by 67.60% when compared with GA algorithm, by 81.34% when compared with Hybrid PWOA and by 85.23% when compared with PSO algorithm when various fog nodes are given.}, } @article {pmid38435622, year = {2024}, author = {Mehmood, T and Latif, S and Jamail, NSM and Malik, A and Latif, R}, title = {LSTMDD: an optimized LSTM-based drift detector for concept drift in dynamic cloud computing.}, journal = {PeerJ. Computer science}, volume = {10}, number = {}, pages = {e1827}, pmid = {38435622}, issn = {2376-5992}, abstract = {This study aims to investigate the problem of concept drift in cloud computing and emphasizes the importance of early detection for enabling optimum resource utilization and offering an effective solution. The analysis includes synthetic and real-world cloud datasets, stressing the need for appropriate drift detectors tailored to the cloud domain. A modified version of Long Short-Term Memory (LSTM) called the LSTM Drift Detector (LSTMDD) is proposed and compared with other top drift detection techniques using prediction error as the primary evaluation metric. LSTMDD is optimized to improve performance in detecting anomalies in non-Gaussian distributed cloud environments. The experiments show that LSTMDD outperforms other methods for gradual and sudden drift in the cloud domain. The findings suggest that machine learning techniques such as LSTMDD could be a promising approach to addressing the problem of concept drift in cloud computing, leading to more efficient resource allocation and improved performance.}, } @article {pmid38429324, year = {2024}, author = {Yin, X and Fang, W and Liu, Z and Liu, D}, title = {A novel multi-scale CNN and Bi-LSTM arbitration dense network model for low-rate DDoS attack detection.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {5111}, pmid = {38429324}, issn = {2045-2322}, support = {2021GX056//the Key Technologies R\&D Program of Weifang/ ; 2023GX063//the Key Technologies R\&D Program of Weifang/ ; KJRC2021002//the Foundation for the Talents by the Weifang University of Science and Technology/ ; ZR2021MF086//the Natural Science Foundation of Shandong Province/ ; 2019GNC106034//the Key R\&D Program of Shandong Province under Grant/ ; }, abstract = {Low-rate distributed denial of service attacks, as known as LDDoS attacks, pose the notorious security risks in cloud computing network. They overload the cloud servers and degrade network service quality with the stealthy strategy. Furthermore, this kind of small ratio and pulse-like abnormal traffic leads to a serious data scale problem. As a result, the existing models for detecting minority and adversary LDDoS attacks are insufficient in both detection accuracy and time consumption. This paper proposes a novel multi-scale Convolutional Neural Networks (CNN) and bidirectional Long-short Term Memory (bi-LSTM) arbitration dense network model (called MSCBL-ADN) for learning and detecting LDDoS attack behaviors under the condition of limited dataset and time consumption. The MSCBL-ADN incorporates CNN for preliminary spatial feature extraction and embedding-based bi-LSTM for time relationship extraction. And then, it employs arbitration network to re-weigh feature importance for higher accuracy. At last, it uses 2-block dense connection network to perform final classification. The experimental results conducted on popular ISCX-2016-SlowDos dataset have demonstrated that the proposed MSCBL-ADN model has a significant improvement with high detection accuracy and superior time performance over the state-of-the-art models.}, } @article {pmid38421498, year = {2024}, author = {Mahato, T and Parida, BR and Bar, S}, title = {Assessing tea plantations biophysical and biochemical characteristics in Northeast India using satellite data.}, journal = {Environmental monitoring and assessment}, volume = {196}, number = {3}, pages = {327}, pmid = {38421498}, issn = {1573-2959}, support = {F.4-5(209-FRP)/2015/BSR//University Grants Commission/ ; }, mesh = {*Environmental Monitoring ; *Camellia sinensis ; India ; Nitrogen ; Tea ; }, abstract = {Despite advancements in using multi-temporal satellite data to assess long-term changes in Northeast India's tea plantations, a research gap exists in understanding the intricate interplay between biophysical and biochemical characteristics. Further exploration is crucial for precise, sustainable monitoring and management. In this study, satellite-derived vegetation indices and near-proximal sensor data were deployed to deduce various physico-chemical characteristics and to evaluate the health conditions of tea plantations in northeast India. The districts, such as Sonitpur, Jorhat, Sibsagar, Dibrugarh, and Tinsukia in Assam were selected, which are the major contributors to the tea industry in India. The Sentinel-2A (2022) data was processed in the Google Earth Engine (GEE) cloud platform and utilized for analyzing tea plantations biochemical and biophysical properties. Leaf chlorophyll (Cab) and nitrogen contents are determined using the Normalized Area Over Reflectance Curve (NAOC) index and flavanol contents, respectively. Biophysical and biochemical parameters of the tea assessed during the spring season (March-April) 2022 revealed that tea plantations located in Tinsukia and Dibrugarh were much healthier than the other districts in Assam which are evident from satellite-derived Enhanced Vegetation Index (EVI), Modified Soil Adjusted Vegetation Index (MSAVI), Leaf Area Index (LAI), and Fraction of Absorbed Photosynthetically Active Radiation (fPAR), including the Cab and nitrogen contents. The Cab of healthy tea plants varied from 25 to 35 µg/cm[2]. Pearson correlation among satellite-derived Cab and nitrogen with field measurements showed R[2] of 0.61-0.62 (p-value < 0.001). This study offered vital information about land alternations and tea health conditions, which can be crucial for conservation, monitoring, and management practices.}, } @article {pmid38420486, year = {2024}, author = {Liu, X and Wider, W and Fauzi, MA and Jiang, L and Udang, LN and Hossain, SFA}, title = {The evolution of smart hotels: A bibliometric review of the past, present and future trends.}, journal = {Heliyon}, volume = {10}, number = {4}, pages = {e26472}, pmid = {38420486}, issn = {2405-8440}, abstract = {This study provides a bibliometric analysis of smart hotel research, drawing from 613 publications in the Web of Science (WoS) database to examine scholarly trends and developments in this dynamic field. Smart hotels, characterized by integrating advanced technologies such as AI, IoT, cloud computing, and big data, aim to redefine customer experiences and operational efficiency. Utilizing co-citation and co-word analysis techniques, the research delves into the depth of literature from past to future trends. In co-citation analysis, clusters including "Sustainable Hotel and Green Hotel", "Theories Integration in Smart Hotel Research", and "Consumers' Decisions about Green Hotels" underscore the pivotal areas of past and current research. Co-word analysis further reveals emergent trend clusters: "The New Era of Sustainable Tourism", "Elevating Standards and Guest Loyalty", and "Hotels' New Sustainable Blueprint in Modern Travel". These clusters reflect the industry's evolving focus on sustainability and technology-enhanced guest experiences. Theoretically, this research bridges gaps in smart hotel literature, proposing new frameworks for understanding customer decisions amid technological advancements and environmental responsibilities. Practically, it offers valuable insights for hotel managers, guiding technology integration strategies for enhanced efficiency and customer loyalty while underscoring the critical role of green strategies and sustainability.}, } @article {pmid38420393, year = {2024}, author = {Mukred, M and Mokhtar, UA and Hawash, B and AlSalman, H and Zohaib, M}, title = {The adoption and use of learning analytics tools to improve decision making in higher learning institutions: An extension of technology acceptance model.}, journal = {Heliyon}, volume = {10}, number = {4}, pages = {e26315}, pmid = {38420393}, issn = {2405-8440}, abstract = {Learning Analytics Tools (LATs) can be used for informed decision-making regarding teaching strategies and their continuous enhancement. Therefore, LATs must be adopted in higher learning institutions, but several factors hinder its implementation, primarily due to the lack of an implementation model. Therefore, in this study, the focus is directed towards examining LATs adoption in Higher Learning Institutions (HLIs), with emphasis on the determinants of the adoption process. The study mainly aims to design a model of LAT adoption and use it in the above context to improve the institutions' decision-making and accordingly, the study adopted an extended version of Technology Acceptance Model (TAM) as the underpinning theory. Five experts validated the employed survey instrument, and 500 questionnaire copies were distributed through e-mails, from which 275 copies were retrieved from Saudi employees working at public HLIs. Data gathered was exposed to Partial Least Square-Structural Equation Modeling (PLS-SEM) for analysis and to test the proposed conceptual model. Based on the findings, the perceived usefulness of LAT plays a significant role as a determinant of its adoption. Other variables include top management support, financial support, and the government's role in LATs acceptance and adoption among HLIs. The findings also supported the contribution of LAT adoption and acceptance towards making informed decisions and highlighted the need for big data facility and cloud computing ability towards LATs usefulness. The findings have significant implications towards LATs implementation success among HLIs, providing clear insights into the factors that can enhance its adoption and acceptance. They also lay the basis for future studies in the area to validate further the effect of LATs on decision-making among HLIs institutions. Furthermore, the obtained findings are expected to serve as practical implications for policy makers and educational leaders in their objective to implement LAT using a multi-layered method that considers other aspects in addition to the perceptions of the individual user.}, } @article {pmid38409183, year = {2024}, author = {Grossman, RL and Boyles, RR and Davis-Dusenbery, BN and Haddock, A and Heath, AP and O'Connor, BD and Resnick, AC and Taylor, DM and Ahalt, S}, title = {A Framework for the Interoperability of Cloud Platforms: Towards FAIR Data in SAFE Environments.}, journal = {Scientific data}, volume = {11}, number = {1}, pages = {241}, pmid = {38409183}, issn = {2052-4463}, support = {HHSN261201400008C/CA/NCI NIH HHS/United States ; }, mesh = {*Cloud Computing ; *Electronic Health Records ; }, abstract = {As the number of cloud platforms supporting scientific research grows, there is an increasing need to support interoperability between two or more cloud platforms. A well accepted core concept is to make data in cloud platforms Findable, Accessible, Interoperable and Reusable (FAIR). We introduce a companion concept that applies to cloud-based computing environments that we call a Secure and Authorized FAIR Environment (SAFE). SAFE environments require data and platform governance structures and are designed to support the interoperability of sensitive or controlled access data, such as biomedical data. A SAFE environment is a cloud platform that has been approved through a defined data and platform governance process as authorized to hold data from another cloud platform and exposes appropriate APIs for the two platforms to interoperate.}, } @article {pmid38404043, year = {2025}, author = {Rusinovich, Y and Rusinovich, V and Buhayenka, A and Liashko, V and Sabanov, A and Holstein, DJF and Aldmour, S and Doss, M and Branzan, D}, title = {Classification of anatomic patterns of peripheral artery disease with automated machine learning (AutoML).}, journal = {Vascular}, volume = {33}, number = {1}, pages = {26-33}, doi = {10.1177/17085381241236571}, pmid = {38404043}, issn = {1708-539X}, mesh = {Humans ; *Peripheral Arterial Disease/diagnostic imaging/classification ; *Machine Learning ; Predictive Value of Tests ; Reproducibility of Results ; *Femoral Artery/diagnostic imaging ; *Popliteal Artery/diagnostic imaging ; Severity of Illness Index ; *Radiographic Image Interpretation, Computer-Assisted ; *Angiography ; *Decision Support Techniques ; Automation ; }, abstract = {AIM: The aim of this study was to investigate the potential of novel automated machine learning (AutoML) in vascular medicine by developing a discriminative artificial intelligence (AI) model for the classification of anatomical patterns of peripheral artery disease (PAD).

MATERIAL AND METHODS: Random open-source angiograms of lower limbs were collected using a web-indexed search. An experienced researcher in vascular medicine labelled the angiograms according to the most applicable grade of femoropopliteal disease in the Global Limb Anatomic Staging System (GLASS). An AutoML model was trained using the Vertex AI (Google Cloud) platform to classify the angiograms according to the GLASS grade with a multi-label algorithm. Following deployment, we conducted a test using 25 random angiograms (five from each GLASS grade). Model tuning through incremental training by introducing new angiograms was executed to the limit of the allocated quota following the initial evaluation to determine its effect on the software's performance.

RESULTS: We collected 323 angiograms to create the AutoML model. Among these, 80 angiograms were labelled as grade 0 of femoropopliteal disease in GLASS, 114 as grade 1, 34 as grade 2, 25 as grade 3 and 70 as grade 4. After 4.5 h of training, the AI model was deployed. The AI self-assessed average precision was 0.77 (0 is minimal and 1 is maximal). During the testing phase, the AI model successfully determined the GLASS grade in 100% of the cases. The agreement with the researcher was almost perfect with the number of observed agreements being 22 (88%), Kappa = 0.85 (95% CI 0.69-1.0). The best results were achieved in predicting GLASS grade 0 and grade 4 (initial precision: 0.76 and 0.84). However, the AI model exhibited poorer results in classifying GLASS grade 3 (initial precision: 0.2) compared to other grades. Disagreements between the AI and the researcher were associated with the low resolution of the test images. Incremental training expanded the initial dataset by 23% to a total of 417 images, which improved the model's average precision by 11% to 0.86.

CONCLUSION: After a brief training period with a limited dataset, AutoML has demonstrated its potential in identifying and classifying the anatomical patterns of PAD, operating unhindered by the factors that can affect human analysts, such as fatigue or lack of experience. This technology bears the potential to revolutionize outcome prediction and standardize evidence-based revascularization strategies for patients with PAD, leveraging its adaptability and ability to continuously improve with additional data. The pursuit of further research in AutoML within the field of vascular medicine is both promising and warranted. However, it necessitates additional financial support to realize its full potential.}, } @article {pmid38403304, year = {2024}, author = {Wu, ZF and Yang, SJ and Yang, YQ and Wang, ZQ and Ai, L and Zhu, GH and Zhu, WF}, title = {[Current situation and development trend of digital traditional Chinese medicine pharmacy].}, journal = {Zhongguo Zhong yao za zhi = Zhongguo zhongyao zazhi = China journal of Chinese materia medica}, volume = {49}, number = {2}, pages = {285-293}, doi = {10.19540/j.cnki.cjcmm.20230904.301}, pmid = {38403304}, issn = {1001-5302}, mesh = {Humans ; Medicine, Chinese Traditional ; Artificial Intelligence ; Technology, Pharmaceutical ; Drug Industry ; *Pharmacy ; *Drugs, Chinese Herbal ; }, abstract = {The 21st century is a highly information-driven era, and traditional Chinese medicine(TCM) pharmacy is also moving towards digitization and informatization. New technologies such as artificial intelligence and big data with information technology as the core are being integrated into various aspects of drug research, manufacturing, evaluation, and application, promoting interaction between these stages and improving the quality and efficiency of TCM preparations. This, in turn, provides better healthcare services to the general population. The deep integration of emerging technologies such as artificial intelligence, big data, and cloud computing with the TCM pharmaceutical industry will innovate TCM pharmaceutical technology, accelerate the research and industrialization process of TCM pharmacy, provide cutting-edge technological support to the global scientific community, boost the efficiency of the TCM industry, and promote economic and social development. Drawing from recent developments in TCM pharmacy in China, this paper discussed the current research status and future trends in digital TCM pharmacy, aiming to provide a reference for future research in this field.}, } @article {pmid38400504, year = {2024}, author = {Alasmary, H}, title = {ScalableDigitalHealth (SDH): An IoT-Based Scalable Framework for Remote Patient Monitoring.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {4}, pages = {}, pmid = {38400504}, issn = {1424-8220}, support = {The authors extend their appreciation to the Deanship of Scientific Research at King Khalid University for funding this work through large group Research Project under grant number RGP2/312/44//King Khalid University/ ; }, mesh = {Aged ; Humans ; *Awareness ; *Benchmarking ; Blood Pressure ; Body Temperature ; Monitoring, Physiologic ; }, abstract = {Addressing the increasing demand for remote patient monitoring, especially among the elderly and mobility-impaired, this study proposes the "ScalableDigitalHealth" (SDH) framework. The framework integrates smart digital health solutions with latency-aware edge computing autoscaling, providing a novel approach to remote patient monitoring. By leveraging IoT technology and application autoscaling, the "SDH" enables the real-time tracking of critical health parameters, such as ECG, body temperature, blood pressure, and oxygen saturation. These vital metrics are efficiently transmitted in real time to AWS cloud storage through a layered networking architecture. The contributions are two-fold: (1) establishing real-time remote patient monitoring and (2) developing a scalable architecture that features latency-aware horizontal pod autoscaling for containerized healthcare applications. The architecture incorporates a scalable IoT-based architecture and an innovative microservice autoscaling strategy in edge computing, driven by dynamic latency thresholds and enhanced by the integration of custom metrics. This work ensures heightened accessibility, cost-efficiency, and rapid responsiveness to patient needs, marking a significant leap forward in the field. By dynamically adjusting pod numbers based on latency, the system optimizes system responsiveness, particularly in edge computing's proximity-based processing. This innovative fusion of technologies not only revolutionizes remote healthcare delivery but also enhances Kubernetes performance, preventing unresponsiveness during high usage.}, } @article {pmid38400486, year = {2024}, author = {Dhiman, P and Saini, N and Gulzar, Y and Turaev, S and Kaur, A and Nisa, KU and Hamid, Y}, title = {A Review and Comparative Analysis of Relevant Approaches of Zero Trust Network Model.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {4}, pages = {}, pmid = {38400486}, issn = {1424-8220}, support = {This research was funded by the United Arab Emirates UAEU-ZU Joint Research Grant G00003819 (Fund No.: 12R138) Emirates Center for Mobility Research.//United Arab Emirates University/ ; }, abstract = {The Zero Trust safety architecture emerged as an intriguing approach for overcoming the shortcomings of standard network security solutions. This extensive survey study provides a meticulous explanation of the underlying principles of Zero Trust, as well as an assessment of the many strategies and possibilities for effective implementation. The survey begins by examining the role of authentication and access control within Zero Trust Architectures, and subsequently investigates innovative authentication, as well as access control solutions across different scenarios. It more deeply explores traditional techniques for encryption, micro-segmentation, and security automation, emphasizing their importance in achieving a secure Zero Trust environment. Zero Trust Architecture is explained in brief, along with the Taxonomy of Zero Trust Network Features. This review article provides useful insights into the Zero Trust paradigm, its approaches, problems, and future research objectives for scholars, practitioners, and policymakers. This survey contributes to the growth and implementation of secure network architectures in critical infrastructures by developing a deeper knowledge of Zero Trust.}, } @article {pmid38400360, year = {2024}, author = {Li, W and Zhou, H and Lu, Z and Kamarthi, S}, title = {Navigating the Evolution of Digital Twins Research through Keyword Co-Occurence Network Analysis.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {4}, pages = {}, pmid = {38400360}, issn = {1424-8220}, abstract = {Digital twin technology has become increasingly popular and has revolutionized data integration and system modeling across various industries, such as manufacturing, energy, and healthcare. This study aims to explore the evolving research landscape of digital twins using Keyword Co-occurrence Network (KCN) analysis. We analyze metadata from 9639 peer-reviewed articles published between 2000 and 2023. The results unfold in two parts. The first part examines trends and keyword interconnection over time, and the second part maps sensing technology keywords to six application areas. This study reveals that research on digital twins is rapidly diversifying, with focused themes such as predictive and decision-making functions. Additionally, there is an emphasis on real-time data and point cloud technologies. The advent of federated learning and edge computing also highlights a shift toward distributed computation, prioritizing data privacy. This study confirms that digital twins have evolved into complex systems that can conduct predictive operations through advanced sensing technologies. The discussion also identifies challenges in sensor selection and empirical knowledge integration.}, } @article {pmid38400338, year = {2024}, author = {Wiryasaputra, R and Huang, CY and Lin, YJ and Yang, CT}, title = {An IoT Real-Time Potable Water Quality Monitoring and Prediction Model Based on Cloud Computing Architecture.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {4}, pages = {}, pmid = {38400338}, issn = {1424-8220}, support = {112-2622-E-029-003,112-2621-M-029-004, and 110-2221-E-029-020-MY3//the National Science and Technology Council (NSTC), Taiwan R.O.C./ ; }, mesh = {Humans ; Artificial Intelligence ; Cloud Computing ; *Drinking Water ; *Internet of Things ; Data Accuracy ; }, abstract = {In order to achieve the Sustainable Development Goals (SDG), it is imperative to ensure the safety of drinking water. The characteristics of each drinkable water, encompassing taste, aroma, and appearance, are unique. Inadequate water infrastructure and treatment can affect these features and may also threaten public health. This study utilizes the Internet of Things (IoT) in developing a monitoring system, particularly for water quality, to reduce the risk of contracting diseases. Water quality components data, such as water temperature, alkalinity or acidity, and contaminants, were obtained through a series of linked sensors. An Arduino microcontroller board acquired all the data and the Narrow Band-IoT (NB-IoT) transmitted them to the web server. Due to limited human resources to observe the water quality physically, the monitoring was complemented by real-time notifications alerts via a telephone text messaging application. The water quality data were monitored using Grafana in web mode, and the binary classifiers of machine learning techniques were applied to predict whether the water was drinkable or not based on the data collected, which were stored in a database. The non-decision tree, as well as the decision tree, were evaluated based on the improvements of the artificial intelligence framework. With a ratio of 60% for data training: at 20% for data validation, and 10% for data testing, the performance of the decision tree (DT) model was more prominent in comparison with the Gradient Boosting (GB), Random Forest (RF), Neural Network (NN), and Support Vector Machine (SVM) modeling approaches. Through the monitoring and prediction of results, the authorities can sample the water sources every two weeks.}, } @article {pmid38400323, year = {2024}, author = {Pan, S and Huang, C and Fan, J and Shi, Z and Tong, J and Wang, H}, title = {Optimizing Internet of Things Fog Computing: Through Lyapunov-Based Long Short-Term Memory Particle Swarm Optimization Algorithm for Energy Consumption Optimization.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {4}, pages = {}, pmid = {38400323}, issn = {1424-8220}, abstract = {In the era of continuous development in Internet of Things (IoT) technology, smart services are penetrating various facets of societal life, leading to a growing demand for interconnected devices. Many contemporary devices are no longer mere data producers but also consumers of data. As a result, massive amounts of data are transmitted to the cloud, but the latency generated in edge-to-cloud communication is unacceptable for many tasks. In response to this, this paper introduces a novel contribution-a layered computing network built on the principles of fog computing, accompanied by a newly devised algorithm designed to optimize user tasks and allocate computing resources within rechargeable networks. The proposed algorithm, a synergy of Lyapunov-based, dynamic Long Short-Term Memory (LSTM) networks, and Particle Swarm Optimization (PSO), allows for predictive task allocation. The fog servers dynamically train LSTM networks to effectively forecast the data features of user tasks, facilitating proper unload decisions based on task priorities. In response to the challenge of slower hardware upgrades in edge devices compared to user demands, the algorithm optimizes the utilization of low-power devices and addresses performance limitations. Additionally, this paper considers the unique characteristics of rechargeable networks, where computing nodes acquire energy through charging. Utilizing Lyapunov functions for dynamic resource control enables nodes with abundant resources to maximize their potential, significantly reducing energy consumption and enhancing overall performance. The simulation results demonstrate that our algorithm surpasses traditional methods in terms of energy efficiency and resource allocation optimization. Despite the limitations of prediction accuracy in Fog Servers (FS), the proposed results significantly promote overall performance. The proposed approach improves the efficiency and the user experience of Internet of Things systems in terms of latency and energy consumption.}, } @article {pmid38400319, year = {2024}, author = {Brata, KC and Funabiki, N and Panduman, YYF and Fajrianti, ED}, title = {An Enhancement of Outdoor Location-Based Augmented Reality Anchor Precision through VSLAM and Google Street View.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {4}, pages = {}, pmid = {38400319}, issn = {1424-8220}, abstract = {Outdoor Location-Based Augmented Reality (LAR) applications require precise positioning for seamless integrations of virtual content into immersive experiences. However, common solutions in outdoor LAR applications rely on traditional smartphone sensor fusion methods, such as the Global Positioning System (GPS) and compasses, which often lack the accuracy needed for precise AR content alignments. In this paper, we introduce an innovative approach to enhance LAR anchor precision in outdoor environments. We leveraged Visual Simultaneous Localization and Mapping (VSLAM) technology, in combination with innovative cloud-based methodologies, and harnessed the extensive visual reference database of Google Street View (GSV), to address the accuracy limitation problems. For the evaluation, 10 Point of Interest (POI) locations were used as anchor point coordinates in the experiments. We compared the accuracies between our approach and the common sensor fusion LAR solution comprehensively involving accuracy benchmarking and running load performance testing. The results demonstrate substantial enhancements in overall positioning accuracies compared to conventional GPS-based approaches for aligning AR anchor content in the real world.}, } @article {pmid38376453, year = {2024}, author = {Horstmann, A and Riggs, S and Chaban, Y and Clare, DK and de Freitas, G and Farmer, D and Howe, A and Morris, KL and Hatton, D}, title = {A service-based approach to cryoEM facility processing pipelines at eBIC.}, journal = {Acta crystallographica. Section D, Structural biology}, volume = {80}, number = {Pt 3}, pages = {174-180}, pmid = {38376453}, issn = {2059-7983}, mesh = {*Software ; *Image Processing, Computer-Assisted/methods ; Cryoelectron Microscopy/methods ; Workflow ; Cloud Computing ; }, abstract = {Electron cryo-microscopy image-processing workflows are typically composed of elements that may, broadly speaking, be categorized as high-throughput workloads which transition to high-performance workloads as preprocessed data are aggregated. The high-throughput elements are of particular importance in the context of live processing, where an optimal response is highly coupled to the temporal profile of the data collection. In other words, each movie should be processed as quickly as possible at the earliest opportunity. The high level of disconnected parallelization in the high-throughput problem directly allows a completely scalable solution across a distributed computer system, with the only technical obstacle being an efficient and reliable implementation. The cloud computing frameworks primarily developed for the deployment of high-availability web applications provide an environment with a number of appealing features for such high-throughput processing tasks. Here, an implementation of an early-stage processing pipeline for electron cryotomography experiments using a service-based architecture deployed on a Kubernetes cluster is discussed in order to demonstrate the benefits of this approach and how it may be extended to scenarios of considerably increased complexity.}, } @article {pmid38370642, year = {2024}, author = {McMurry, AJ and Gottlieb, DI and Miller, TA and Jones, JR and Atreja, A and Crago, J and Desai, PM and Dixon, BE and Garber, M and Ignatov, V and Kirchner, LA and Payne, PRO and Saldanha, AJ and Shankar, PRV and Solad, YV and Sprouse, EA and Terry, M and Wilcox, AB and Mandl, KD}, title = {Cumulus: A federated EHR-based learning system powered by FHIR and AI.}, journal = {medRxiv : the preprint server for health sciences}, volume = {}, number = {}, pages = {}, pmid = {38370642}, support = {NU38OT000286/OT/OSTLTS CDC HHS/United States ; U01 TR002623/TR/NCATS NIH HHS/United States ; U01 TR002997/TR/NCATS NIH HHS/United States ; U18 DP006500/DP/NCCDPHP CDC HHS/United States ; }, abstract = {OBJECTIVE: To address challenges in large-scale electronic health record (EHR) data exchange, we sought to develop, deploy, and test an open source, cloud-hosted app 'listener' that accesses standardized data across the SMART/HL7 Bulk FHIR Access application programming interface (API).

METHODS: We advance a model for scalable, federated, data sharing and learning. Cumulus software is designed to address key technology and policy desiderata including local utility, control, and administrative simplicity as well as privacy preservation during robust data sharing, and AI for processing unstructured text.

RESULTS: Cumulus relies on containerized, cloud-hosted software, installed within a healthcare organization's security envelope. Cumulus accesses EHR data via the Bulk FHIR interface and streamlines automated processing and sharing. The modular design enables use of the latest AI and natural language processing tools and supports provider autonomy and administrative simplicity. In an initial test, Cumulus was deployed across five healthcare systems each partnered with public health. Cumulus output is patient counts which were aggregated into a table stratifying variables of interest to enable population health studies. All code is available open source. A policy stipulating that only aggregate data leave the institution greatly facilitated data sharing agreements.

DISCUSSION AND CONCLUSION: Cumulus addresses barriers to data sharing based on (1) federally required support for standard APIs (2), increasing use of cloud computing, and (3) advances in AI. There is potential for scalability to support learning across myriad network configurations and use cases.}, } @article {pmid38370229, year = {2024}, author = {Yadav, N and Pattabiraman, B and Tummuru, NR and Soundharajan, BS and Kasiviswanathan, KS and Adeloye, AJ and Sen, S and Maurya, M and Vijayalakshmanan, S}, title = {Toward improving water-energy-food nexus through dynamic energy management of solar powered automated irrigation system.}, journal = {Heliyon}, volume = {10}, number = {4}, pages = {e25359}, pmid = {38370229}, issn = {2405-8440}, abstract = {This paper focuses on developing a water and energy-saving reliable irrigation system using state-of-the-art computing, communication, and optimal energy management framework. The framework integrates real-time soil moisture and weather forecasting information to decide the time of irrigation and quantity of water required for potato crops, which is made available to the users across a region through the cloud-based irrigation decision support system. This is accomplished through various modules such as data acquisition, soil moisture forecasting, smart irrigation scheduling, and energy management scheme. The main emphasizes is on the electrical segment which demonstrates an energy management scheme for PV-battery based grid-connected system to operate the irrigation system valves and water pump. The proposed scheme is verified through simulation and dSpace-based real-time experiment studies. Overall, the proposed energy management system demonstrates an improvement in the optimal onsite solar power generation and storage capacity to power the solar pump which save the electrical energy as well as the water in order to establish an improved solar-irrigation system. Finally, the proposed system achieved water and energy savings of around 9.24 % for potato crop with full irrigation enhancing the Water-Energy-Food Nexus at field scale.}, } @article {pmid38365804, year = {2024}, author = {Beteri, J and Lyimo, JG and Msinde, JV}, title = {The influence of climatic and environmental variables on sunflower planting season suitability in Tanzania.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {3906}, pmid = {38365804}, issn = {2045-2322}, mesh = {Seasons ; *Helianthus ; Tanzania ; Temperature ; Plants ; *Asteraceae ; }, abstract = {Crop survival and growth requires identification of correlations between appropriate suitable planting season and relevant climatic and environmental characteristics. Climatic and environmental conditions may cause water and heat stress at critical stages of crop development and thus affecting planting suitability. Consequently, this may affect crop yield and productivity. This study assesses the influence of climate and environmental variables on rain-fed sunflower planting season suitability in Tanzania. Data on rainfall, temperature, slope, elevation, soil and land use/or cover were accessed from publicly available sources using Google Earth Engine. This is a cloud-based geospatial computing platform for remote sensed datasets. Tanzania sunflower production calendar of 2022 was adopted to mark the start and end limits of planting across the country. The default climate and environmental parameters from FAO database were used. In addition, Pearson correlation was used to evaluate the relationship between rainfall, temperature over Normalized Difference Vegetation Index (NDVI) from 2000 to 2020 at five-year interval for January-April and June-September, for high and poor suitability season. The results showed that planting suitability of sunflower in Tanzania is driven more by rainfall than temperature. It was revealed that intra-annual planting suitability increases gradually from short to long- rain season and diminishes towards dry season of the year. January-April planting season window showing highest suitability (41.65%), whereas June-September indicating lowest suitability (0.05%). Though, not statistically significant, rainfall and NDVI were positively correlated with r = 0.65 and 0.75 whereas negative correlation existed between temperature and NDVI with r = -- 0.6 and - 0.77. We recommend sunflower subsector interventions that consider appropriate intra-regional and seasonal diversity as an important adaptive mechanism to ensure high sunflower yields.}, } @article {pmid38360949, year = {2024}, author = {Periola, AA and Alonge, AA and Ogudo, KA}, title = {Ocean warming events resilience capability in underwater computing platforms.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {3781}, pmid = {38360949}, issn = {2045-2322}, abstract = {Underwater data centers (UDCs) use the ocean's cold-water resources for free cooling and have low cooling costs. However, UDC cooling is affected by marine heat waves, and underwater seismic events thereby affecting UDC functioning continuity. Though feasible, the use of reservoirs for UDC cooling is non-scalable due to the high computing overhead, and inability to support continuity for long duration marine heat waves. The presented research proposes a mobile UDC (capable of migration) to address this challenge. The proposed UDC migrates from high underwater ground displacement ocean regions to regions having no or small underwater ground displacement. It supports multiple client underwater applications without requiring clients to develop, deploy, and launch own UDCs. The manner of resource utilization is influenced by the client's service level agreement. Hence, the proposed UDC provides resilient services to the clients and the requiring applications. Analysis shows that using the mobile UDC instead of the existing reservoir UDC approach enhances the operational duration and power usage effectiveness by 8.9-48.5% and 55.6-70.7% on average, respectively. In addition, the overhead is reduced by an average of 95.8-99.4%.}, } @article {pmid38355983, year = {2024}, author = {Kashyap, P and Shivgan, K and Patil, S and Raja, BR and Mahajan, S and Banerjee, S and Tallur, S}, title = {Unsupervised deep learning framework for temperature-compensated damage assessment using ultrasonic guided waves on edge device.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {3751}, pmid = {38355983}, issn = {2045-2322}, support = {RD/0118-ISROC00-006//Indian Space Research Organisation/ ; CRG/2021/001959//Science and Engineering Research Board/ ; }, abstract = {Fueled by the rapid development of machine learning (ML) and greater access to cloud computing and graphics processing units, various deep learning based models have been proposed for improving performance of ultrasonic guided wave structural health monitoring (GW-SHM) systems, especially to counter complexity and heterogeneity in data due to varying environmental factors (e.g., temperature) and types of damages. Such models typically comprise of millions of trainable parameters, and therefore add to cost of deployment due to requirements of cloud connectivity and processing, thus limiting the scale of deployment of GW-SHM. In this work, we propose an alternative solution that leverages TinyML framework for development of light-weight ML models that could be directly deployed on embedded edge devices. The utility of our solution is illustrated by presenting an unsupervised learning framework for damage detection in honeycomb composite sandwich structure with disbond and delamination type of damages, validated using data generated by finite element simulations and experiments performed at various temperatures in the range 0-90 °C. We demonstrate a fully-integrated solution using a Xilinx Artix-7 FPGA for data acquisition and control, and edge-inference of damage. Despite the limited number of features, the lightweight model shows reasonably high accuracy, thereby enabling detection of small size defects with improved sensitivity on an edge device for online GW-SHM.}, } @article {pmid38351164, year = {2024}, author = {Feng, Q and Niu, B and Ren, Y and Su, S and Wang, J and Shi, H and Yang, J and Han, M}, title = {A 10-m national-scale map of ground-mounted photovoltaic power stations in China of 2020.}, journal = {Scientific data}, volume = {11}, number = {1}, pages = {198}, pmid = {38351164}, issn = {2052-4463}, support = {42001367//National Natural Science Foundation of China (National Science Foundation of China)/ ; }, abstract = {We provide a remote sensing derived dataset for large-scale ground-mounted photovoltaic (PV) power stations in China of 2020, which has high spatial resolution of 10 meters. The dataset is based on the Google Earth Engine (GEE) cloud computing platform via random forest classifier and active learning strategy. Specifically, ground samples are carefully collected across China via both field survey and visual interpretation. Afterwards, spectral and texture features are calculated from publicly available Sentinel-2 imagery. Meanwhile, topographic features consisting of slope and aspect that are sensitive to PV locations are also included, aiming to construct a multi-dimensional and discriminative feature space. Finally, the trained random forest model is adopted to predict PV power stations of China parallelly on GEE. Technical validation has been carefully performed across China which achieved a satisfactory accuracy over 89%. Above all, as the first publicly released 10-m national-scale distribution dataset of China's ground-mounted PV power stations, it can provide data references for relevant researchers in fields such as energy, land, remote sensing and environmental sciences.}, } @article {pmid38351065, year = {2024}, author = {Chuntakaruk, H and Hengphasatporn, K and Shigeta, Y and Aonbangkhen, C and Lee, VS and Khotavivattana, T and Rungrotmongkol, T and Hannongbua, S}, title = {FMO-guided design of darunavir analogs as HIV-1 protease inhibitors.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {3639}, pmid = {38351065}, issn = {2045-2322}, mesh = {Humans ; Darunavir/pharmacology ; *HIV Protease Inhibitors/pharmacology/chemistry ; *HIV-1/genetics ; Molecular Docking Simulation ; Sulfonamides/pharmacology ; *HIV Infections ; Viral Proteins/genetics ; HIV Protease/metabolism ; Mutation ; Drug Resistance, Viral/genetics ; }, abstract = {The prevalence of HIV-1 infection continues to pose a significant global public health issue, highlighting the need for antiretroviral drugs that target viral proteins to reduce viral replication. One such target is HIV-1 protease (PR), responsible for cleaving viral polyproteins, leading to the maturation of viral proteins. While darunavir (DRV) is a potent HIV-1 PR inhibitor, drug resistance can arise due to mutations in HIV-1 PR. To address this issue, we developed a novel approach using the fragment molecular orbital (FMO) method and structure-based drug design to create DRV analogs. Using combinatorial programming, we generated novel analogs freely accessible via an on-the-cloud mode implemented in Google Colab, Combined Analog generator Tool (CAT). The designed analogs underwent cascade screening through molecular docking with HIV-1 PR wild-type and major mutations at the active site. Molecular dynamics (MD) simulations confirmed the assess ligand binding and susceptibility of screened designed analogs. Our findings indicate that the three designed analogs guided by FMO, 19-0-14-3, 19-8-10-0, and 19-8-14-3, are superior to DRV and have the potential to serve as efficient PR inhibitors. These findings demonstrate the effectiveness of our approach and its potential to be used in further studies for developing new antiretroviral drugs.}, } @article {pmid38350039, year = {2024}, author = {Bell, J and Decker, B and Eichmann, A and Palkovich, C and Reji, C}, title = {Effectiveness of Virtual Reality for Upper Extremity Function and Motor Performance of Children With Cerebral Palsy: A Systematic Review.}, journal = {The American journal of occupational therapy : official publication of the American Occupational Therapy Association}, volume = {78}, number = {2}, pages = {}, doi = {10.5014/ajot.2024.050374}, pmid = {38350039}, issn = {0272-9490}, mesh = {Humans ; *Cerebral Palsy/rehabilitation/physiopathology ; *Upper Extremity/physiopathology ; Child ; *Virtual Reality ; *Motor Skills ; *Occupational Therapy/methods ; Adolescent ; }, abstract = {IMPORTANCE: Research on the functional and motor performance impact of virtual reality (VR) as an intervention tool for children with cerebral palsy (CP) is limited.

OBJECTIVE: To understand whether VR is an effective intervention to improve upper extremity (UE) function and motor performance of children diagnosed with CP.

DATA SOURCES: Databases used in the search were EBSCOhost, One Search, PubMed, Cloud Source, CINAHL, SPORTDiscus, and Google Scholar.

Studies published from 2006 to 2021 were included if children had a diagnosis of CP and were age 21 yr or younger, VR was used as an intervention, and measures of UE function and motor performance were used.

FINDINGS: Twenty-one studies were included, and the results provided promising evidence for improvements in areas of UE function, motor performance, and fine motor skills when VR is used as an intervention. To yield noticeable UE improvements in children with CP, VR should be implemented for 30 to 60 min/session and for at least 360 min over more than 3 wk. Additional areas of improvement include gross motor skills, functional mobility, occupational performance, and intrinsic factors.

CONCLUSIONS AND RELEVANCE: The use of VR as an intervention for children with CP to improve UE function and motor performance is supported. More randomized controlled trials with larger sample sizes focusing on similar outcomes and intervention frequencies are needed to determine the most effective type of VR for use in clinical occupational therapy. Plain-Language Summary: This systematic review explains how virtual reality (VR) has been used as an intervention with children with cerebral palsy (CP). The review synthesizes the results of 21 research studies of children who had a diagnosis of CP and who were 21 years old or younger. The findings support using VR to improve upper extremity performance, motor performance, and fine motor skills. The findings also show that occupational therapy practitioners should use a VR intervention at a minimum frequency of 30 to 60 minutes per session and for at least 360 minutes over more than 3 weeks to yield noticeable improvements in upper extremity, motor performance, and fine motor skills for children with CP.}, } @article {pmid38347885, year = {2024}, author = {Bhattacharjee, T and Kiwuwa-Muyingo, S and Kanjala, C and Maoyi, ML and Amadi, D and Ochola, M and Kadengye, D and Gregory, A and Kiragga, A and Taylor, A and Greenfield, J and Slaymaker, E and Todd, J and , }, title = {INSPIRE datahub: a pan-African integrated suite of services for harmonising longitudinal population health data using OHDSI tools.}, journal = {Frontiers in digital health}, volume = {6}, number = {}, pages = {1329630}, pmid = {38347885}, issn = {2673-253X}, abstract = {INTRODUCTION: Population health data integration remains a critical challenge in low- and middle-income countries (LMIC), hindering the generation of actionable insights to inform policy and decision-making. This paper proposes a pan-African, Findable, Accessible, Interoperable, and Reusable (FAIR) research architecture and infrastructure named the INSPIRE datahub. This cloud-based Platform-as-a-Service (PaaS) and on-premises setup aims to enhance the discovery, integration, and analysis of clinical, population-based surveys, and other health data sources.

METHODS: The INSPIRE datahub, part of the Implementation Network for Sharing Population Information from Research Entities (INSPIRE), employs the Observational Health Data Sciences and Informatics (OHDSI) open-source stack of tools and the Observational Medical Outcomes Partnership (OMOP) Common Data Model (CDM) to harmonise data from African longitudinal population studies. Operating on Microsoft Azure and Amazon Web Services cloud platforms, and on on-premises servers, the architecture offers adaptability and scalability for other cloud providers and technology infrastructure. The OHDSI-based tools enable a comprehensive suite of services for data pipeline development, profiling, mapping, extraction, transformation, loading, documentation, anonymization, and analysis.

RESULTS: The INSPIRE datahub's "On-ramp" services facilitate the integration of data and metadata from diverse sources into the OMOP CDM. The datahub supports the implementation of OMOP CDM across data producers, harmonizing source data semantically with standard vocabularies and structurally conforming to OMOP table structures. Leveraging OHDSI tools, the datahub performs quality assessment and analysis of the transformed data. It ensures FAIR data by establishing metadata flows, capturing provenance throughout the ETL processes, and providing accessible metadata for potential users. The ETL provenance is documented in a machine- and human-readable Implementation Guide (IG), enhancing transparency and usability.

CONCLUSION: The pan-African INSPIRE datahub presents a scalable and systematic solution for integrating health data in LMICs. By adhering to FAIR principles and leveraging established standards like OMOP CDM, this architecture addresses the current gap in generating evidence to support policy and decision-making for improving the well-being of LMIC populations. The federated research network provisions allow data producers to maintain control over their data, fostering collaboration while respecting data privacy and security concerns. A use-case demonstrated the pipeline using OHDSI and other open-source tools.}, } @article {pmid38345858, year = {2024}, author = {Zandesh, Z}, title = {Privacy, Security, and Legal Issues in the Health Cloud: Structured Review for Taxonomy Development.}, journal = {JMIR formative research}, volume = {8}, number = {}, pages = {e38372}, pmid = {38345858}, issn = {2561-326X}, abstract = {BACKGROUND: Privacy in our digital world is a very complicated topic, especially when meeting cloud computing technological achievements with its multidimensional context. Here, privacy is an extended concept that is sometimes referred to as legal, philosophical, or even technical. Consequently, there is a need to harmonize it with other aspects in health care in order to provide a new ecosystem. This new ecosystem can lead to a paradigm shift involving the reconstruction and redesign of some of the most important and essential requirements like privacy concepts, legal issues, and security services. Cloud computing in the health domain has markedly contributed to other technologies, such as mobile health, health Internet of Things, and wireless body area networks, with their increasing numbers of embedded applications. Other dependent applications, which are usually used in health businesses like social networks, or some newly introduced applications have issues regarding privacy transparency boundaries and privacy-preserving principles, which have made policy making difficult in the field.

OBJECTIVE: One way to overcome this challenge is to develop a taxonomy to identify all relevant factors. A taxonomy serves to bring conceptual clarity to the set of alternatives in in-person health care delivery. This study aimed to construct a comprehensive taxonomy for privacy in the health cloud, which also provides a prospective landscape for privacy in related technologies.

METHODS: A search was performed for relevant published English papers in databases, including Web of Science, IEEE Digital Library, Google Scholar, Scopus, and PubMed. A total of 2042 papers were related to the health cloud privacy concept according to predefined keywords and search strings. Taxonomy designing was performed using the deductive methodology.

RESULTS: This taxonomy has 3 layers. The first layer has 4 main dimensions, including cloud, data, device, and legal. The second layer has 15 components, and the final layer has related subcategories (n=57). This taxonomy covers some related concepts, such as privacy, security, confidentiality, and legal issues, which are categorized here and defined by their expansion and distinctive boundaries. The main merits of this taxonomy are its ability to clarify privacy terms for different scenarios and signalize the privacy multidisciplinary objectification in eHealth.

CONCLUSIONS: This taxonomy can cover health industry requirements with its specifications like health data and scenarios, which are considered as the most complicated among businesses and industries. Therefore, the use of this taxonomy could be generalized and customized to other domains and businesses that have less complications. Moreover, this taxonomy has different stockholders, including people, organizations, and systems. If the antecedent effort in the taxonomy is proven, subject matter experts could enhance the extent of privacy in the health cloud by verifying, evaluating, and revising this taxonomy.}, } @article {pmid38344670, year = {2024}, author = {Simpson, RL and Lee, JA and Li, Y and Kang, YJ and Tsui, C and Cimiotti, JP}, title = {Medicare meets the cloud: the development of a secure platform for the storage and analysis of claims data.}, journal = {JAMIA open}, volume = {7}, number = {1}, pages = {ooae007}, pmid = {38344670}, issn = {2574-2531}, abstract = {INTRODUCTION: Cloud-based solutions are a modern-day necessity for data intense computing. This case report describes in detail the development and implementation of Amazon Web Services (AWS) at Emory-a secure, reliable, and scalable platform to store and analyze identifiable research data from the Centers for Medicare and Medicaid Services (CMS).

MATERIALS AND METHODS: Interdisciplinary teams from CMS, MBL Technologies, and Emory University collaborated to ensure compliance with CMS policy that consolidates laws, regulations, and other drivers of information security and privacy.

RESULTS: A dedicated team of individuals ensured successful transition from a physical storage server to a cloud-based environment. This included implementing access controls, vulnerability scanning, and audit logs that are reviewed regularly with a remediation plan. User adaptation required specific training to overcome the challenges of cloud computing.

CONCLUSION: Challenges created opportunities for lessons learned through the creation of an end-product accepted by CMS and shared across disciplines university-wide.}, } @article {pmid38339714, year = {2024}, author = {González-Herbón, R and González-Mateos, G and Rodríguez-Ossorio, JR and Domínguez, M and Alonso, S and Fuertes, JJ}, title = {An Approach to Develop Digital Twins in Industry.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {3}, pages = {}, pmid = {38339714}, issn = {1424-8220}, support = {Grant PID2020-117890RB-I00//Ministerio de Ciencia e Innovación/ ; }, abstract = {The industry is currently undergoing a digital revolution driven by the integration of several enabling technologies. These include automation, robotics, cloud computing, industrial cybersecurity, systems integration, digital twins, etc. Of particular note is the increasing use of digital twins, which offer significant added value by providing realistic and fully functional process simulations. This paper proposes an approach for developing digital twins in industrial environments. The novelty lies in not only focusing on obtaining the model of the industrial system and integrating virtual reality and/or augmented reality but also in emphasizing the importance of incorporating other enabled technologies of Industry 4.0, such as system integration, connectivity with standard and specific industrial protocols, cloud services, or new industrial automation systems, to enhance the capabilities of the digital twin. Furthermore, a proposal of the software tools that can be used to achieve this incorporation is made. Unity is chosen as the real-time 3D development tool for its cross-platform capability and streamlined industrial system modeling. The integration of augmented reality is facilitated by the Vuforia SDK. Node-RED is selected as the system integration option, and communications are carried out with MQTT protocol. Finally, cloud-based services are recommended for effective data storage and processing. Furthermore, this approach has been used to develop a digital twin of a robotic electro-pneumatic cell.}, } @article {pmid38339672, year = {2024}, author = {Lu, Y and Zhou, L and Zhang, A and Zha, S and Zhuo, X and Ge, S}, title = {Application of Deep Learning and Intelligent Sensing Analysis in Smart Home.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {3}, pages = {}, pmid = {38339672}, issn = {1424-8220}, abstract = {Deep learning technology can improve sensing efficiency and has the ability to discover potential patterns in data; the efficiency of user behavior recognition in the field of smart homes has been further improved, making the recognition process more intelligent and humanized. This paper analyzes the optical sensors commonly used in smart homes and their working principles through case studies and explores the technical framework of user behavior recognition based on optical sensors. At the same time, CiteSpace (Basic version 6.2.R6) software is used to visualize and analyze the related literature, elaborate the main research hotspots and evolutionary changes of optical sensor-based smart home user behavior recognition, and summarize the future research trends. Finally, fully utilizing the advantages of cloud computing technology, such as scalability and on-demand services, combining typical life situations and the requirements of smart home users, a smart home data collection and processing technology framework based on elderly fall monitoring scenarios is designed. Based on the comprehensive research results, the application and positive impact of optical sensors in smart home user behavior recognition were analyzed, and inspiration was provided for future smart home user experience research.}, } @article {pmid38339591, year = {2024}, author = {Ehtisham, M and Hassan, MU and Al-Awady, AA and Ali, A and Junaid, M and Khan, J and Abdelrahman Ali, YA and Akram, M}, title = {Internet of Vehicles (IoV)-Based Task Scheduling Approach Using Fuzzy Logic Technique in Fog Computing Enables Vehicular Ad Hoc Network (VANET).}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {3}, pages = {}, pmid = {38339591}, issn = {1424-8220}, support = {NU/IFC/2/SERC/-/47//Najran University/ ; Authors would like to acknowledge the support of the Deputy for Research and Innovation Ministry of Education, Kingdom of Saudi Arabia, for this research through a grant (NU/IFC/2/SERC/-/47) under the Institutional Funding Committee at Najran University,//Najran University/ ; }, abstract = {The intelligent transportation system (ITS) relies heavily on the vehicular ad hoc network (VANET) and the internet of vehicles (IoVs), which combine cloud and fog to improve task processing capabilities. As a cloud extension, the fog processes' infrastructure is close to VANET, fostering an environment favorable to smart cars with IT equipment and effective task management oversight. Vehicle processing power, bandwidth, time, and high-speed mobility are all limited in VANET. It is critical to satisfy the vehicles' requirements for minimal latency and fast reaction times while offloading duties to the fog layer. We proposed a fuzzy logic-based task scheduling system in VANET to minimize latency and improve the enhanced response time when offloading tasks in the IoV. The proposed method effectively transfers workloads to the fog computing layer while considering the constrained resources of car nodes. After choosing a suitable processing unit, the algorithm sends the job and its associated resources to the fog layer. The dataset is related to crisp values for fog computing for system utilization, latency, and task deadline time for over 5000 values. The task execution, latency, deadline of task, storage, CPU, and bandwidth utilizations are used for fuzzy set values. We proved the effectiveness of our proposed task scheduling framework via simulation tests, outperforming current algorithms in terms of task ratio by 13%, decreasing average turnaround time by 9%, minimizing makespan time by 15%, and effectively overcoming average latency time within the network parameters. The proposed technique shows better results and responses than previous techniques by scheduling the tasks toward fog layers with less response time and minimizing the overall time from task submission to completion.}, } @article {pmid38339582, year = {2024}, author = {Hassan, MU and Al-Awady, AA and Ali, A and Iqbal, MM and Akram, M and Jamil, H}, title = {Smart Resource Allocation in Mobile Cloud Next-Generation Network (NGN) Orchestration with Context-Aware Data and Machine Learning for the Cost Optimization of Microservice Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {3}, pages = {}, pmid = {38339582}, issn = {1424-8220}, support = {NU/IFC/2/SERC/-/47//Deputy for Research and Innovation Ministry of Education, Kingdom of Saudi Arabia/ ; }, abstract = {Mobile cloud computing (MCC) provides resources to users to handle smart mobile applications. In MCC, task scheduling is the solution for mobile users' context-aware computation resource-rich applications. Most existing approaches have achieved a moderate service reliability rate due to a lack of instance-centric resource estimations and task offloading, a statistical NP-hard problem. The current intelligent scheduling process cannot address NP-hard problems due to traditional task offloading approaches. To address this problem, the authors design an efficient context-aware service offloading approach based on instance-centric measurements. The revised machine learning model/algorithm employs task adaptation to make decisions regarding task offloading. The proposed MCVS scheduling algorithm predicts the usage rates of individual microservices for a practical task scheduling scheme, considering mobile device time, cost, network, location, and central processing unit (CPU) power to train data. One notable feature of the microservice software architecture is its capacity to facilitate the scalability, flexibility, and independent deployment of individual components. A series of simulation results show the efficiency of the proposed technique based on offloading, CPU usage, and execution time metrics. The experimental results efficiently show the learning rate in training and testing in comparison with existing approaches, showing efficient training and task offloading phases. The proposed system has lower costs and uses less energy to offload microservices in MCC. Graphical results are presented to define the effectiveness of the proposed model. For a service arrival rate of 80%, the proposed model achieves an average 4.5% service offloading rate and 0.18% CPU usage rate compared with state-of-the-art approaches. The proposed method demonstrates efficiency in terms of cost and energy savings for microservice offloading in mobile cloud computing (MCC).}, } @article {pmid38339552, year = {2024}, author = {Parracciani, C and Gigante, D and Bonini, F and Grassi, A and Morbidini, L and Pauselli, M and Valenti, B and Lilli, E and Antonielli, F and Vizzari, M}, title = {Leveraging Google Earth Engine for a More Effective Grassland Management: A Decision Support Application Perspective.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {3}, pages = {}, pmid = {38339552}, issn = {1424-8220}, mesh = {Animals ; Humans ; *Ecosystem ; *Grassland ; Search Engine ; Biodiversity ; Agriculture ; Livestock ; }, abstract = {Grasslands cover a substantial portion of the earth's surface and agricultural land and is crucial for human well-being and livestock farming. Ranchers and grassland management authorities face challenges in effectively controlling herders' grazing behavior and grassland utilization due to underdeveloped infrastructure and poor communication in pastoral areas. Cloud-based grazing management and decision support systems (DSS) are needed to address this issue, promote sustainable grassland use, and preserve their ecosystem services. These systems should enable rapid and large-scale grassland growth and utilization monitoring, providing a basis for decision-making in managing grazing and grassland areas. In this context, this study contributes to the objectives of the EU LIFE IMAGINE project, aiming to develop a Web-GIS app for conserving and monitoring Umbria's grasslands and promoting more informed decisions for more sustainable livestock management. The app, called "Praterie" and developed in Google Earth Engine, utilizes historical Sentinel-2 satellite data and harmonic modeling of the EVI (Enhanced Vegetation Index) to estimate vegetation growth curves and maturity periods for the forthcoming vegetation cycle. The app is updated in quasi-real time and enables users to visualize estimates for the upcoming vegetation cycle, including the maximum greenness, the days remaining to the subsequent maturity period, the accuracy of the harmonic models, and the grassland greenness status in the previous 10 days. Even though future additional developments can improve the informative value of the Praterie app, this platform can contribute to optimizing livestock management and biodiversity conservation by providing timely and accurate data about grassland status and growth curves.}, } @article {pmid38339545, year = {2024}, author = {Gragnaniello, M and Borghese, A and Marrazzo, VR and Maresca, L and Breglio, G and Irace, A and Riccio, M}, title = {Real-Time Myocardial Infarction Detection Approaches with a Microcontroller-Based Edge-AI Device.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {3}, pages = {}, pmid = {38339545}, issn = {1424-8220}, support = {PNC0000007//Italian Ministry for Universities and Research (MUR)/ ; }, mesh = {Humans ; *Myocardial Infarction/diagnosis ; Heart ; *Heart Diseases ; Myocardium ; Algorithms ; }, abstract = {Myocardial Infarction (MI), commonly known as heart attack, is a cardiac condition characterized by damage to a portion of the heart, specifically the myocardium, due to the disruption of blood flow. Given its recurring and often asymptomatic nature, there is the need for continuous monitoring using wearable devices. This paper proposes a single-microcontroller-based system designed for the automatic detection of MI based on the Edge Computing paradigm. Two solutions for MI detection are evaluated, based on Machine Learning (ML) and Deep Learning (DL) techniques. The developed algorithms are based on two different approaches currently available in the literature, and they are optimized for deployment on low-resource hardware. A feasibility assessment of their implementation on a single 32-bit microcontroller with an ARM Cortex-M4 core was examined, and a comparison in terms of accuracy, inference time, and memory usage was detailed. For ML techniques, significant data processing for feature extraction, coupled with a simpler Neural Network (NN) is involved. On the other hand, the second method, based on DL, employs a Spectrogram Analysis for feature extraction and a Convolutional Neural Network (CNN) with a longer inference time and higher memory utilization. Both methods employ the same low power hardware reaching an accuracy of 89.40% and 94.76%, respectively. The final prototype is an energy-efficient system capable of real-time detection of MI without the need to connect to remote servers or the cloud. All processing is performed at the edge, enabling NN inference on the same microcontroller.}, } @article {pmid38332408, year = {2024}, author = {Huang, Z and Herbozo Contreras, LF and Yu, L and Truong, ND and Nikpour, A and Kavehei, O}, title = {S4D-ECG: A Shallow State-of-the-Art Model for Cardiac Abnormality Classification.}, journal = {Cardiovascular engineering and technology}, volume = {15}, number = {3}, pages = {305-316}, pmid = {38332408}, issn = {1869-4098}, mesh = {Humans ; *Electrocardiography ; *Signal Processing, Computer-Assisted ; *Algorithms ; *Predictive Value of Tests ; Heart Rate ; Reproducibility of Results ; Time Factors ; Models, Cardiovascular ; Arrhythmias, Cardiac/physiopathology/diagnosis/classification ; Action Potentials ; Diagnosis, Computer-Assisted ; }, abstract = {PURPOSE: This study introduces an algorithm specifically designed for processing unprocessed 12-lead electrocardiogram (ECG) data, with the primary aim of detecting cardiac abnormalities.

METHODS: The proposed model integrates Diagonal State Space Sequence (S4D) model into its architecture, leveraging its effectiveness in capturing dynamics within time-series data. The S4D model is designed with stacked S4D layers for processing raw input data and a simplified decoder using a dense layer for predicting abnormality types. Experimental optimization determines the optimal number of S4D layers, striking a balance between computational efficiency and predictive performance. This comprehensive approach ensures the model's suitability for real-time processing on hardware devices with limited capabilities, offering a streamlined yet effective solution for heart monitoring.

RESULTS: Among the notable features of this algorithm is its strong resilience to noise, enabling the algorithm to achieve an average F1-score of 81.2% and an AUROC of 95.5% in generalization. The model underwent testing specifically on the lead II ECG signal, exhibiting consistent performance with an F1-score of 79.5% and an AUROC of 95.7%.

CONCLUSION: It is characterized by the elimination of pre-processing features and the availability of a low-complexity architecture that makes it suitable for implementation on numerous computing devices because it is easily implementable. Consequently, this algorithm exhibits considerable potential for practical applications in analyzing real-world ECG data. This model can be placed on the cloud for diagnosis. The model was also tested on lead II of the ECG alone and has demonstrated promising results, supporting its potential for on-device application.}, } @article {pmid38327871, year = {2024}, author = {Schönherr, S and Schachtl-Riess, JF and Di Maio, S and Filosi, M and Mark, M and Lamina, C and Fuchsberger, C and Kronenberg, F and Forer, L}, title = {Performing highly parallelized and reproducible GWAS analysis on biobank-scale data.}, journal = {NAR genomics and bioinformatics}, volume = {6}, number = {1}, pages = {lqae015}, pmid = {38327871}, issn = {2631-9268}, abstract = {Genome-wide association studies (GWAS) are transforming genetic research and enable the detection of novel genotype-phenotype relationships. In the last two decades, over 60 000 genetic associations across thousands of traits have been discovered using a GWAS approach. Due to increasing sample sizes, researchers are increasingly faced with computational challenges. A reproducible, modular and extensible pipeline with a focus on parallelization is essential to simplify data analysis and to allow researchers to devote their time to other essential tasks. Here we present nf-gwas, a Nextflow pipeline to run biobank-scale GWAS analysis. The pipeline automatically performs numerous pre- and post-processing steps, integrates regression modeling from the REGENIE package and supports single-variant, gene-based and interaction testing. It includes an extensive reporting functionality that allows to inspect thousands of phenotypes and navigate interactive Manhattan plots directly in the web browser. The pipeline is tested using the unit-style testing framework nf-test, a crucial requirement in clinical and pharmaceutical settings. Furthermore, we validated the pipeline against published GWAS datasets and benchmarked the pipeline on high-performance computing and cloud infrastructures to provide cost estimations to end users. nf-gwas is a highly parallelized, scalable and well-tested Nextflow pipeline to perform GWAS analysis in a reproducible manner.}, } @article {pmid38324613, year = {2024}, author = {Swetnam, TL and Antin, PB and Bartelme, R and Bucksch, A and Camhy, D and Chism, G and Choi, I and Cooksey, AM and Cosi, M and Cowen, C and Culshaw-Maurer, M and Davey, R and Davey, S and Devisetty, U and Edgin, T and Edmonds, A and Fedorov, D and Frady, J and Fonner, J and Gillan, JK and Hossain, I and Joyce, B and Lang, K and Lee, T and Littin, S and McEwen, I and Merchant, N and Micklos, D and Nelson, A and Ramsey, A and Roberts, S and Sarando, P and Skidmore, E and Song, J and Sprinkle, MM and Srinivasan, S and Stanzione, D and Strootman, JD and Stryeck, S and Tuteja, R and Vaughn, M and Wali, M and Wall, M and Walls, R and Wang, L and Wickizer, T and Williams, J and Wregglesworth, J and Lyons, E}, title = {CyVerse: Cyberinfrastructure for open science.}, journal = {PLoS computational biology}, volume = {20}, number = {2}, pages = {e1011270}, pmid = {38324613}, issn = {1553-7358}, mesh = {Humans ; *Artificial Intelligence ; *Software ; Cloud Computing ; Publishing ; }, abstract = {CyVerse, the largest publicly-funded open-source research cyberinfrastructure for life sciences, has played a crucial role in advancing data-driven research since the 2010s. As the technology landscape evolved with the emergence of cloud computing platforms, machine learning and artificial intelligence (AI) applications, CyVerse has enabled access by providing interfaces, Software as a Service (SaaS), and cloud-native Infrastructure as Code (IaC) to leverage new technologies. CyVerse services enable researchers to integrate institutional and private computational resources, custom software, perform analyses, and publish data in accordance with open science principles. Over the past 13 years, CyVerse has registered more than 124,000 verified accounts from 160 countries and was used for over 1,600 peer-reviewed publications. Since 2011, 45,000 students and researchers have been trained to use CyVerse. The platform has been replicated and deployed in three countries outside the US, with additional private deployments on commercial clouds for US government agencies and multinational corporations. In this manuscript, we present a strategic blueprint for creating and managing SaaS cyberinfrastructure and IaC as free and open-source software.}, } @article {pmid38323147, year = {2024}, author = {Lewis, EC and Zhu, S and Oladimeji, AT and Igusa, T and Martin, NM and Poirier, L and Trujillo, AJ and Reznar, MM and Gittelsohn, J}, title = {Design of an innovative digital application to facilitate access to healthy foods in low-income urban settings.}, journal = {mHealth}, volume = {10}, number = {}, pages = {2}, pmid = {38323147}, issn = {2306-9740}, support = {R34 HL145368/HL/NHLBI NIH HHS/United States ; T32 DK062707/DK/NIDDK NIH HHS/United States ; }, abstract = {BACKGROUND: Under-resourced urban minority communities in the United States are characterized by food environments with low access to healthy foods, high food insecurity, and high rates of diet-related chronic disease. In Baltimore, Maryland, low access to healthy food largely results from a distribution gap between small food sources (retailers) and their suppliers. Digital interventions have the potential to address this gap, while keeping costs low.

METHODS: In this paper, we describe the technical (I) front-end design and (II) back-end development process of the Baltimore Urban food Distribution (BUD) application (app). We identify and detail four main phases of the process: (I) information architecture; (II) low and high-fidelity wireframes; (III) prototype; and (IV) back-end components, while considering formative research and a pre-pilot test of a preliminary version of the BUD app.

RESULTS: Our lessons learned provide valuable insight into developing a stable app with a user-friendly experience and interface, and accessible cloud computing services for advanced technical features.

CONCLUSIONS: Next steps will involve a pilot trial of the app in Baltimore, and eventually, other urban and rural settings nationwide. Once iterative feedback is incorporated into the app, all code will be made publicly available via an open source repository to encourage adaptation for desired communities.

TRIAL REGISTRATION: ClinicalTrials.gov NCT05010018.}, } @article {pmid38321247, year = {2024}, author = {Pacios, D and Vázquez-Poletti, JL and Dhuri, DB and Atri, D and Moreno-Vozmediano, R and Lillis, RJ and Schetakis, N and Gómez-Sanz, J and Iorio, AD and Vázquez, L}, title = {A serverless computing architecture for Martian aurora detection with the Emirates Mars Mission.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {3029}, pmid = {38321247}, issn = {2045-2322}, support = {101007638//Horizon 2020 Framework Programme/ ; G1502//New York University Abu Dhabi/ ; S1560//Advanced Technology Research Council/ ; }, abstract = {Remote sensing technologies are experiencing a surge in adoption for monitoring Earth's environment, demanding more efficient and scalable methods for image analysis. This paper presents a new approach for the Emirates Mars Mission (Hope probe); A serverless computing architecture designed to analyze images of Martian auroras, a key aspect in understanding the Martian atmosphere. Harnessing the power of OpenCV and machine learning algorithms, our architecture offers image classification, object detection, and segmentation in a swift and cost-effective manner. Leveraging the scalability and elasticity of cloud computing, this innovative system is capable of managing high volumes of image data, adapting to fluctuating workloads. This technology, applied to the study of Martian auroras within the HOPE Mission, not only solves a complex problem but also paves the way for future applications in the broad field of remote sensing.}, } @article {pmid38315519, year = {2024}, author = {Xu, J}, title = {The Current Status and Promotional Strategies for Cloud Migration of Hospital Information Systems in China: Strengths, Weaknesses, Opportunities, and Threats Analysis.}, journal = {JMIR medical informatics}, volume = {12}, number = {}, pages = {e52080}, pmid = {38315519}, issn = {2291-9694}, abstract = {BACKGROUND: In the 21st century, Chinese hospitals have witnessed innovative medical business models, such as online diagnosis and treatment, cross-regional multidepartment consultation, and real-time sharing of medical test results, that surpass traditional hospital information systems (HISs). The introduction of cloud computing provides an excellent opportunity for hospitals to address these challenges. However, there is currently no comprehensive research assessing the cloud migration of HISs in China. This lack may hinder the widespread adoption and secure implementation of cloud computing in hospitals.

OBJECTIVE: The objective of this study is to comprehensively assess external and internal factors influencing the cloud migration of HISs in China and propose promotional strategies.

METHODS: Academic articles from January 1, 2007, to February 21, 2023, on the topic were searched in PubMed and HuiyiMd databases, and relevant documents such as national policy documents, white papers, and survey reports were collected from authoritative sources for analysis. A systematic assessment of factors influencing cloud migration of HISs in China was conducted by combining a Strengths, Weaknesses, Opportunities, and Threats (SWOT) analysis and literature review methods. Then, various promotional strategies based on different combinations of external and internal factors were proposed.

RESULTS: After conducting a thorough search and review, this study included 94 academic articles and 37 relevant documents. The analysis of these documents reveals the increasing application of and research on cloud computing in Chinese hospitals, and that it has expanded to 22 disciplinary domains. However, more than half (n=49, 52%) of the documents primarily focused on task-specific cloud-based systems in hospitals, while only 22% (n=21 articles) discussed integrated cloud platforms shared across the entire hospital, medical alliance, or region. The SWOT analysis showed that cloud computing adoption in Chinese hospitals benefits from policy support, capital investment, and social demand for new technology. However, it also faces threats like loss of digital sovereignty, supplier competition, cyber risks, and insufficient supervision. Factors driving cloud migration for HISs include medical big data analytics and use, interdisciplinary collaboration, health-centered medical service provision, and successful cases. Barriers include system complexity, security threats, lack of strategic planning and resource allocation, relevant personnel shortages, and inadequate investment. This study proposes 4 promotional strategies: encouraging more hospitals to migrate, enhancing hospitals' capabilities for migration, establishing a provincial-level unified medical hybrid multi-cloud platform, strengthening legal frameworks, and providing robust technical support.

CONCLUSIONS: Cloud computing is an innovative technology that has gained significant attention from both the Chinese government and the global community. In order to effectively support the rapid growth of a novel, health-centered medical industry, it is imperative for Chinese health authorities and hospitals to seize this opportunity by implementing comprehensive strategies aimed at encouraging hospitals to migrate their HISs to the cloud.}, } @article {pmid38312948, year = {2024}, author = {Ssekagiri, A and Jjingo, D and Bbosa, N and Bugembe, DL and Kateete, DP and Jordan, IK and Kaleebu, P and Ssemwanga, D}, title = {HIVseqDB: a portable resource for NGS and sample metadata integration for HIV-1 drug resistance analysis.}, journal = {Bioinformatics advances}, volume = {4}, number = {1}, pages = {vbae008}, pmid = {38312948}, issn = {2635-0041}, support = {MC_UU_00027/5/MRC_/Medical Research Council/United Kingdom ; MC_UU_00033/1/MRC_/Medical Research Council/United Kingdom ; }, abstract = {SUMMARY: Human immunodeficiency virus (HIV) remains a public health threat, with drug resistance being a major concern in HIV treatment. Next-generation sequencing (NGS) is a powerful tool for identifying low-abundance drug resistance mutations (LA-DRMs) that conventional Sanger sequencing cannot reliably detect. To fully understand the significance of LA-DRMs, it is necessary to integrate NGS data with clinical and demographic data. However, freely available tools for NGS-based HIV-1 drug resistance analysis do not integrate these data. This poses a challenge in interpretation of the impact of LA-DRMs, mainly for resource-limited settings due to the shortage of bioinformatics expertise. To address this challenge, we present HIVseqDB, a portable, secure, and user-friendly resource for integrating NGS data with associated clinical and demographic data for analysis of HIV drug resistance. HIVseqDB currently supports uploading of NGS data and associated sample data, HIV-1 drug resistance data analysis, browsing of uploaded data, and browsing and visualizing of analysis results. Each function of HIVseqDB corresponds to an individual Django application. This ensures efficient incorporation of additional features with minimal effort. HIVseqDB can be deployed on various computing environments, such as on-premises high-performance computing facilities and cloud-based platforms.

HIVseqDB is available at https://github.com/AlfredUg/HIVseqDB. A deployed instance of HIVseqDB is available at https://hivseqdb.org.}, } @article {pmid38308984, year = {2024}, author = {Lan, L and Wang, YG and Chen, HS and Gao, XR and Wang, XK and Yan, XF}, title = {Improving on mapping long-term surface water with a novel framework based on the Landsat imagery series.}, journal = {Journal of environmental management}, volume = {353}, number = {}, pages = {120202}, doi = {10.1016/j.jenvman.2024.120202}, pmid = {38308984}, issn = {1095-8630}, mesh = {*Water ; *Environmental Monitoring/methods ; Satellite Imagery ; Environment ; Algorithms ; }, abstract = {Surface water plays a crucial role in the ecological environment and societal development. Remote sensing detection serves as a significant approach to understand the temporal and spatial change in surface water series (SWS) and to directly construct long-term SWS. Limited by various factors such as cloud, cloud shadow, and problematic satellite sensor monitoring, the existent surface water mapping datasets might be short and incomplete due to losing raw information on certain dates. Improved algorithms are desired to increase the completeness and quality of SWS datasets. The present study proposes an automated framework to detect SWS, based on the Google Earth Engine and Landsat satellite imagery. This framework incorporates implementing a raw image filtering algorithm to increase available images, thereby expanding the completeness. It improves OTSU thresholding by replacing anomaly thresholds with the median value, thus enhancing the accuracy of SWS datasets. Gaps caused by Landsat7 ETM + SLC-off are respired with the random forest algorithm and morphological operations. The results show that this novel framework effectively expands the long-term series of SWS for three surface water bodies with distinct geomorphological patterns. The evaluation of confusion matrices suggests the good performance of extracting surface water, with the overall accuracy ranging from 0.96 to 0.97, and user's accuracy between 0.96 and 0.98, producer's accuracy ranging from 0.83 to 0.89, and Matthews correlation coefficient ranging from 0.87 to 0.9 for several spectral water indices (NDWI, MNDWI, ANNDWI, and AWEI). Compared with the Global Reservoirs Surface Area Dynamics (GRSAD) dataset, our constructed datasets promote greater completeness of SWS datasets by 27.01%-91.89% for the selected water bodies. The proposed framework for detecting SWS shows good potential in enlarging and completing long-term global-scale SWS datasets, capable of supporting assessments of surface-water-related environmental management and disaster prevention.}, } @article {pmid38303478, year = {2024}, author = {Lv, W and Chen, J and Cheng, S and Qiu, X and Li, D}, title = {QoS-driven resource allocation in fog radio access network: A VR service perspective.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {21}, number = {1}, pages = {1573-1589}, doi = {10.3934/mbe.2024068}, pmid = {38303478}, issn = {1551-0018}, abstract = {While immersive media services represented by virtual reality (VR) are booming, They are facing fundamental challenges, i.e., soaring multimedia applications, large operation costs and scarce spectrum resources. It is difficult to simultaneously address these service challenges in a conventional radio access network (RAN) system. These problems motivated us to explore a quality-of-service (QoS)-driven resource allocation framework from VR service perspective based on the fog radio access network (F-RAN) architecture. We elaborated details of deployment on the caching allocation, dynamic base station (BS) clustering, statistical beamforming and cost strategy under the QoS constraints in the F-RAN architecture. The key solutions aimed to break through the bottleneck of the network design and to deep integrate the network-computing resources from different perspectives of cloud, network, edge, terminal and use of collaboration and integration. Accordingly, we provided a tailored algorithm to solve the corresponding formulation problem. This is the first design of VR services based on caching and statistical beamforming under the F-RAN. A case study provided to demonstrate the advantage of our proposed framework compared with existing schemes. Finally, we concluded the article and discussed possible open research problems.}, } @article {pmid38303438, year = {2024}, author = {Niu, Q and Li, H and Liu, Y and Qin, Z and Zhang, LB and Chen, J and Lyu, Z}, title = {Toward the Internet of Medical Things: Architecture, trends and challenges.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {21}, number = {1}, pages = {650-678}, doi = {10.3934/mbe.2024028}, pmid = {38303438}, issn = {1551-0018}, mesh = {*Artificial Intelligence ; Big Data ; Cloud Computing ; Internet ; *Internet of Things ; }, abstract = {In recent years, the growing pervasiveness of wearable technology has created new opportunities for medical and emergency rescue operations to protect users' health and safety, such as cost-effective medical solutions, more convenient healthcare and quick hospital treatments, which make it easier for the Internet of Medical Things (IoMT) to evolve. The study first presents an overview of the IoMT before introducing the IoMT architecture. Later, it portrays an overview of the core technologies of the IoMT, including cloud computing, big data and artificial intelligence, and it elucidates their utilization within the healthcare system. Further, several emerging challenges, such as cost-effectiveness, security, privacy, accuracy and power consumption, are discussed, and potential solutions for these challenges are also suggested.}, } @article {pmid38301786, year = {2024}, author = {Shrestha, N and Kolarik, NE and Brandt, JS}, title = {Mesic vegetation persistence: A new approach for monitoring spatial and temporal changes in water availability in dryland regions using cloud computing and the sentinel and Landsat constellations.}, journal = {The Science of the total environment}, volume = {917}, number = {}, pages = {170491}, doi = {10.1016/j.scitotenv.2024.170491}, pmid = {38301786}, issn = {1879-1026}, abstract = {Climate change and anthropogenic activity pose severe threats to water availability in drylands. A better understanding of water availability response to these threats could improve our ability to adapt and mitigate climate and anthropogenic effects. Here, we present a Mesic Vegetation Persistence (MVP) workflow that takes every usable image in the Sentinel (10-m) and Landsat (30-m) archives to generate a dense time-series of water availability that is continuously updated as new images become available in Google Earth Engine. MVP takes advantage of the fact that mesic vegetation can be used as a proxy of available water in drylands. Our MVP workflow combines a novel moisture-based index (moisture change index - MCI) with a vegetation index (Modified Chlorophyll Absorption Ratio Vegetation Index (MCARI2)). MCI is the difference in soil moisture condition between an individual pixel's state and the dry and wet reference reflectance in the image, derived using 5th and 95th percentiles of the visible and shortwave infra-red drought index (VSDI). We produced and validated our MVP products across drylands of the western U.S., covering a broad range of elevation, land use, and ecoregions. MVP outperforms NDVI, a commonly-employed index for mesic ecosystem health, in both rangeland and forested ecosystems, and in mesic habitats with particularly high and low vegetation cover. We applied our MVP product at case study sites and found that MVP more accurately characterizes differences in mesic persistence, late-season water availability, and restoration success compared to NDVI. MVP could be applied as an indicator of change in a variety of contexts to provide a greater understanding of how water availability changes as a result of climate and management. Our MVP product for the western U.S. is freely available within a Google Earth Engine Web App, and the MVP workflow is replicable for other dryland regions.}, } @article {pmid38293581, year = {2024}, author = {Zurqani, HA}, title = {The first generation of a regional-scale 1-m forest canopy cover dataset using machine learning and google earth engine cloud computing platform: A case study of Arkansas, USA.}, journal = {Data in brief}, volume = {52}, number = {}, pages = {109986}, pmid = {38293581}, issn = {2352-3409}, abstract = {Forest canopy cover (FCC) is essential in forest assessment and management, affecting ecosystem services such as carbon sequestration, wildlife habitat, and water regulation. Ongoing advancements in techniques for accurately and efficiently mapping and extracting FCC information require a thorough evaluation of their validity and reliability. The primary objectives of this study are to: (1) create a large-scale forest FCC dataset with a 1-meter spatial resolution, (2) assess the regional spatial distribution of FCC at a regional scale, and (3) investigate differences in FCC areas among the Global Forest Change (Hansen et al., 2013) and U.S. Forest Service Tree Canopy Cover products at various spatial scales in Arkansas (i.e., county and city levels). This study utilized high-resolution aerial imagery and a machine learning algorithm processed and analyzed using the Google Earth Engine cloud computing platform to produce the FCC dataset. The accuracy of this dataset was validated using one-third of the reference locations obtained from the Global Forest Change (Hansen et al., 2013) dataset and the National Agriculture Imagery Program (NAIP) aerial imagery with a 0.6-m spatial resolution. The results showed that the dataset successfully identified FCC at a 1-m resolution in the study area, with overall accuracy ranging between 83.31% and 94.35% per county. Spatial comparison results between the produced FCC dataset and the Hansen et al., 2013 and USFS products indicated a strong positive correlation, with R[2] values ranging between 0.94 and 0.98 for county and city levels. This dataset provides valuable information for monitoring, forecasting, and managing forest resources in Arkansas and beyond. The methodology followed in this study enhances efficiency, cost-effectiveness, and scalability, as it enables the processing of large-scale datasets with high computational demands in a cloud-based environment. It also demonstrates that machine learning and cloud computing technologies can generate high-resolution forest cover datasets, which might be helpful in other regions of the world.}, } @article {pmid38292471, year = {2024}, author = {Li, W and Zhang, Z and Xie, B and He, Y and He, K and Qiu, H and Lu, Z and Jiang, C and Pan, X and He, Y and Hu, W and Liu, W and Que, T and Hu, Y}, title = {HiOmics: A cloud-based one-stop platform for the comprehensive analysis of large-scale omics data.}, journal = {Computational and structural biotechnology journal}, volume = {23}, number = {}, pages = {659-668}, pmid = {38292471}, issn = {2001-0370}, abstract = {Analyzing the vast amount of omics data generated comprehensively by high-throughput sequencing technology is of utmost importance for scientists. In this context, we propose HiOmics, a cloud-based platform equipped with nearly 300 plugins designed for the comprehensive analysis and visualization of omics data. HiOmics utilizes the Element Plus framework to craft a user-friendly interface and harnesses Docker container technology to ensure the reliability and reproducibility of data analysis results. Furthermore, HiOmics employs the Workflow Description Language and Cromwell engine to construct workflows, ensuring the portability of data analysis and simplifying the examination of intricate data. Additionally, HiOmics has developed DataCheck, a tool based on Golang, which verifies and converts data formats. Finally, by leveraging the object storage technology and batch computing capabilities of public cloud platforms, HiOmics enables the storage and processing of large-scale data while maintaining resource independence among users.}, } @article {pmid38289970, year = {2024}, author = {Abbasi, IA and Jan, SU and Alqahtani, AS and Khan, AS and Algarni, F}, title = {A lightweight and robust authentication scheme for the healthcare system using public cloud server.}, journal = {PloS one}, volume = {19}, number = {1}, pages = {e0294429}, pmid = {38289970}, issn = {1932-6203}, mesh = {Humans ; *Confidentiality ; *Telemedicine ; Computer Security ; Delivery of Health Care ; Privacy ; }, abstract = {Cloud computing is vital in various applications, such as healthcare, transportation, governance, and mobile computing. When using a public cloud server, it is mandatory to be secured from all known threats because a minor attacker's disturbance severely threatens the whole system. A public cloud server is posed with numerous threats; an adversary can easily enter the server to access sensitive information, especially for the healthcare industry, which offers services to patients, researchers, labs, and hospitals in a flexible way with minimal operational costs. It is challenging to make it a reliable system and ensure the privacy and security of a cloud-enabled healthcare system. In this regard, numerous security mechanisms have been proposed in past decades. These protocols either suffer from replay attacks, are completed in three to four round trips or have maximum computation, which means the security doesn't balance with performance. Thus, this work uses a fuzzy extractor method to propose a robust security method for a cloud-enabled healthcare system based on Elliptic Curve Cryptography (ECC). The proposed scheme's security analysis has been examined formally with BAN logic, ROM and ProVerif and informally using pragmatic illustration and different attacks' discussions. The proposed security mechanism is analyzed in terms of communication and computation costs. Upon comparing the proposed protocol with prior work, it has been demonstrated that our scheme is 33.91% better in communication costs and 35.39% superior to its competitors in computation costs.}, } @article {pmid38289917, year = {2024}, author = {Sun, Y and Du, X and Niu, S and Zhou, S}, title = {A lightweight attribute-based signcryption scheme based on cloud-fog assisted in smart healthcare.}, journal = {PloS one}, volume = {19}, number = {1}, pages = {e0297002}, pmid = {38289917}, issn = {1932-6203}, mesh = {Humans ; *Computer Security ; *Algorithms ; Big Data ; Cloud Computing ; Delivery of Health Care ; }, abstract = {In the environment of big data of the Internet of Things, smart healthcare is developed in combination with cloud computing. However, with the generation of massive data in smart healthcare systems and the need for real-time data processing, traditional cloud computing is no longer suitable for resources-constrained devices in the Internet of Things. In order to address this issue, we combine the advantages of fog computing and propose a cloud-fog assisted attribute-based signcryption for smart healthcare. In the constructed "cloud-fog-terminal" three-layer model, before the patient (data owner)signcryption, it first offloads some heavy computation burden to fog nodes and the doctor (data user) also outsources some complicated operations to fog nodes before unsigncryption by providing a blinded private key, which greatly reduces the calculation overhead of resource-constrained devices of patient and doctor, improves the calculation efficiency. Thus it implements a lightweight signcryption algorithm. Security analysis confirms that the proposed scheme achieves indistinguishability under chosen ciphertext attack and existential unforgeability under chosen message attack if the computational bilinear Diffie-Hellman problem and the decisional bilinear Diffie-Hellman problem holds. Furthermore, performance analysis demonstrates that our new scheme has less computational overhead for both doctors and patients, so it offers higher computational efficiency and is well-suited for application scenarios of smart healthcare.}, } @article {pmid38283301, year = {2024}, author = {Amjad, S and Akhtar, A and Ali, M and Afzal, A and Shafiq, B and Vaidya, J and Shamail, S and Rana, O}, title = {Orchestration and Management of Adaptive IoT-centric Distributed Applications.}, journal = {IEEE internet of things journal}, volume = {11}, number = {3}, pages = {3779-3791}, pmid = {38283301}, issn = {2327-4662}, support = {R35 GM134927/GM/NIGMS NIH HHS/United States ; }, abstract = {Current Internet of Things (IoT) devices provide a diverse range of functionalities, ranging from measurement and dissemination of sensory data observation, to computation services for real-time data stream processing. In extreme situations such as emergencies, a significant benefit of IoT devices is that they can help gain a more complete situational understanding of the environment. However, this requires the ability to utilize IoT resources while taking into account location, battery life, and other constraints of the underlying edge and IoT devices. A dynamic approach is proposed for orchestration and management of distributed workflow applications using services available in cloud data centers, deployed on servers, or IoT devices at the network edge. Our proposed approach is specifically designed for knowledge-driven business process workflows that are adaptive, interactive, evolvable and emergent. A comprehensive empirical evaluation shows that the proposed approach is effective and resilient to situational changes.}, } @article {pmid38273718, year = {2024}, author = {Wu, Y and Sanati, O and Uchimiya, M and Krishnamurthy, K and Wedell, J and Hoch, JC and Edison, AS and Delaglio, F}, title = {SAND: Automated Time-Domain Modeling of NMR Spectra Applied to Metabolite Quantification.}, journal = {Analytical chemistry}, volume = {96}, number = {5}, pages = {1843-1851}, pmid = {38273718}, issn = {1520-6882}, support = {P41 GM111135/GM/NIGMS NIH HHS/United States ; }, mesh = {*Algorithms ; Magnetic Resonance Spectroscopy ; *Magnetic Resonance Imaging ; Software ; Metabolomics ; }, abstract = {Developments in untargeted nuclear magnetic resonance (NMR) metabolomics enable the profiling of thousands of biological samples. The exploitation of this rich source of information requires a detailed quantification of spectral features. However, the development of a consistent and automatic workflow has been challenging because of extensive signal overlap. To address this challenge, we introduce the software Spectral Automated NMR Decomposition (SAND). SAND follows on from the previous success of time-domain modeling and automatically quantifies entire spectra without manual interaction. The SAND approach uses hybrid optimization with Markov chain Monte Carlo methods, employing subsampling in both time and frequency domains. In particular, SAND randomly divides the time-domain data into training and validation sets to help avoid overfitting. We demonstrate the accuracy of SAND, which provides a correlation of ∼0.9 with ground truth on cases including highly overlapped simulated data sets, a two-compound mixture, and a urine sample spiked with different amounts of a four-compound mixture. We further demonstrate an automated annotation using correlation networks derived from SAND decomposed peaks, and on average, 74% of peaks for each compound can be recovered in single clusters. SAND is available in NMRbox, the cloud computing environment for NMR software hosted by the Network for Advanced NMR (NAN). Since the SAND method uses time-domain subsampling (i.e., random subset of time-domain points), it has the potential to be extended to a higher dimensionality and nonuniformly sampled data.}, } @article {pmid38270978, year = {2024}, author = {Dral, PO and Ge, F and Hou, YF and Zheng, P and Chen, Y and Barbatti, M and Isayev, O and Wang, C and Xue, BX and Pinheiro, M and Su, Y and Dai, Y and Chen, Y and Zhang, L and Zhang, S and Ullah, A and Zhang, Q and Ou, Y}, title = {MLatom 3: A Platform for Machine Learning-Enhanced Computational Chemistry Simulations and Workflows.}, journal = {Journal of chemical theory and computation}, volume = {20}, number = {3}, pages = {1193-1213}, pmid = {38270978}, issn = {1549-9626}, abstract = {Machine learning (ML) is increasingly becoming a common tool in computational chemistry. At the same time, the rapid development of ML methods requires a flexible software framework for designing custom workflows. MLatom 3 is a program package designed to leverage the power of ML to enhance typical computational chemistry simulations and to create complex workflows. This open-source package provides plenty of choice to the users who can run simulations with the command-line options, input files, or with scripts using MLatom as a Python package, both on their computers and on the online XACS cloud computing service at XACScloud.com. Computational chemists can calculate energies and thermochemical properties, optimize geometries, run molecular and quantum dynamics, and simulate (ro)vibrational, one-photon UV/vis absorption, and two-photon absorption spectra with ML, quantum mechanical, and combined models. The users can choose from an extensive library of methods containing pretrained ML models and quantum mechanical approximations such as AIQM1 approaching coupled-cluster accuracy. The developers can build their own models using various ML algorithms. The great flexibility of MLatom is largely due to the extensive use of the interfaces to many state-of-the-art software packages and libraries.}, } @article {pmid38269892, year = {2024}, author = {Renato, A and Luna, D and Benítez, S}, title = {Development of an ASR System for Medical Conversations.}, journal = {Studies in health technology and informatics}, volume = {310}, number = {}, pages = {664-668}, doi = {10.3233/SHTI231048}, pmid = {38269892}, issn = {1879-8365}, mesh = {Humans ; *Communication ; Language ; Speech ; Acoustics ; *Physicians ; }, abstract = {In this work we document the development of an ASR system for the transcription of conversations between patient and doctor and we will point out the critical aspects of the domain. The system was trained with an acoustic base of spontaneous speech that has a domain language model and a supervised phonetic dictionary. Its performance was compared with two systems: a) NeMo End-to-End Conformers in Spanish and b) Google API ASR (Automatic Speech Recognition) Cloud. The evaluation was carried out on a set of 208 teleconsultations recorded during the year 2020. The WER (Word Error Rate) was evaluated in ASR, and Recall and F1 for recognized medical entities. In conclusion, the developed system performed better, reaching 72.5% accuracy in the domain of teleconsultations and an F1 for entity recognition of 0.80.}, } @article {pmid38257526, year = {2024}, author = {Malik, AW and Bhatti, DS and Park, TJ and Ishtiaq, HU and Ryou, JC and Kim, KI}, title = {Cloud Digital Forensics: Beyond Tools, Techniques, and Challenges.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {2}, pages = {}, pmid = {38257526}, issn = {1424-8220}, support = {RS-2022-00144000//National Research Foundation of Korea/ ; 2022-0-01200//Information & Communications Technology Planning & Evaluation/ ; }, abstract = {Cloud computing technology is rapidly becoming ubiquitous and indispensable. However, its widespread adoption also exposes organizations and individuals to a broad spectrum of potential threats. Despite the multiple advantages the cloud offers, organizations remain cautious about migrating their data and applications to the cloud due to fears of data breaches and security compromises. In light of these concerns, this study has conducted an in-depth examination of a variety of articles to enhance the comprehension of the challenges related to safeguarding and fortifying data within the cloud environment. Furthermore, the research has scrutinized several well-documented data breaches, analyzing the financial consequences they inflicted. Additionally, it scrutinizes the distinctions between conventional digital forensics and the forensic procedures specific to cloud computing. As a result of this investigation, the study has concluded by proposing potential opportunities for further research in this critical domain. By doing so, it contributes to our collective understanding of the complex panorama of cloud data protection and security, while acknowledging the evolving nature of technology and the need for ongoing exploration and innovation in this field. This study also helps in understanding the compound annual growth rate (CAGR) of cloud digital forensics, which is found to be quite high at ≈16.53% from 2023 to 2031. Moreover, its market is expected to reach ≈USD 36.9 billion by the year 2031; presently, it is ≈USD 11.21 billion, which shows that there are great opportunities for investment in this area. This study also strategically addresses emerging challenges in cloud digital forensics, providing a comprehensive approach to navigating and overcoming the complexities associated with the evolving landscape of cloud computing.}, } @article {pmid38248999, year = {2024}, author = {Molnár, T and Király, G}, title = {Forest Disturbance Monitoring Using Cloud-Based Sentinel-2 Satellite Imagery and Machine Learning.}, journal = {Journal of imaging}, volume = {10}, number = {1}, pages = {}, pmid = {38248999}, issn = {2313-433X}, support = {TKP2021-NKTA-43//Ministry of Innovation and Technology of Hungary/ ; }, abstract = {Forest damage has become more frequent in Hungary in the last decades, and remote sensing offers a powerful tool for monitoring them rapidly and cost-effectively. A combined approach was developed to utilise high-resolution ESA Sentinel-2 satellite imagery and Google Earth Engine cloud computing and field-based forest inventory data. Maps and charts were derived from vegetation indices (NDVI and Z∙NDVI) of satellite images to detect forest disturbances in the Hungarian study site for the period of 2017-2020. The NDVI maps were classified to reveal forest disturbances, and the cloud-based method successfully showed drought and frost damage in the oak-dominated Nagyerdő forest of Debrecen. Differences in the reactions to damage between tree species were visible on the index maps; therefore, a random forest machine learning classifier was applied to show the spatial distribution of dominant species. An accuracy assessment was accomplished with confusion matrices that compared classified index maps to field-surveyed data, demonstrating 99.1% producer, 71% user, and 71% total accuracies for forest damage and 81.9% for tree species. Based on the results of this study and the resilience of Google Earth Engine, the presented method has the potential to be extended to monitor all of Hungary in a faster, more accurate way using systematically collected field-data, the latest satellite imagery, and artificial intelligence.}, } @article {pmid38248542, year = {2024}, author = {Willingham, TB and Stowell, J and Collier, G and Backus, D}, title = {Leveraging Emerging Technologies to Expand Accessibility and Improve Precision in Rehabilitation and Exercise for People with Disabilities.}, journal = {International journal of environmental research and public health}, volume = {21}, number = {1}, pages = {}, pmid = {38248542}, issn = {1660-4601}, support = {90REGE0011/ACL/ACL HHS/United States ; }, mesh = {Humans ; Artificial Intelligence ; Quality of Life ; *Medicine ; Exercise ; *Persons with Disabilities ; }, abstract = {Physical rehabilitation and exercise training have emerged as promising solutions for improving health, restoring function, and preserving quality of life in populations that face disparate health challenges related to disability. Despite the immense potential for rehabilitation and exercise to help people with disabilities live longer, healthier, and more independent lives, people with disabilities can experience physical, psychosocial, environmental, and economic barriers that limit their ability to participate in rehabilitation, exercise, and other physical activities. Together, these barriers contribute to health inequities in people with disabilities, by disproportionately limiting their ability to participate in health-promoting physical activities, relative to people without disabilities. Therefore, there is great need for research and innovation focusing on the development of strategies to expand accessibility and promote participation in rehabilitation and exercise programs for people with disabilities. Here, we discuss how cutting-edge technologies related to telecommunications, wearables, virtual and augmented reality, artificial intelligence, and cloud computing are providing new opportunities to improve accessibility in rehabilitation and exercise for people with disabilities. In addition, we highlight new frontiers in digital health technology and emerging lines of scientific research that will shape the future of precision care strategies for people with disabilities.}, } @article {pmid38248199, year = {2024}, author = {Yan, Z and Lin, X and Zhang, X and Xu, J and Qu, H}, title = {Identity-Based Matchmaking Encryption with Equality Test.}, journal = {Entropy (Basel, Switzerland)}, volume = {26}, number = {1}, pages = {}, pmid = {38248199}, issn = {1099-4300}, abstract = {The identity-based encryption with equality test (IBEET) has become a hot research topic in cloud computing as it provides an equality test for ciphertexts generated under different identities while preserving the confidentiality. Subsequently, for the sake of the confidentiality and authenticity of the data, the identity-based signcryption with equality test (IBSC-ET) has been put forward. Nevertheless, the existing schemes do not consider the anonymity of the sender and the receiver, which leads to the potential leakage of sensitive personal information. How to ensure confidentiality, authenticity, and anonymity in the IBEET setting remains a significant challenge. In this paper, we put forward the concept of the identity-based matchmaking encryption with equality test (IBME-ET) to address this issue. We formalized the system model, the definition, and the security models of the IBME-ET and, then, put forward a concrete scheme. Furthermore, our scheme was confirmed to be secure and practical by proving its security and evaluating its performance.}, } @article {pmid38247937, year = {2024}, author = {Kim, J and Jang, H and Koh, H}, title = {MiMultiCat: A Unified Cloud Platform for the Analysis of Microbiome Data with Multi-Categorical Responses.}, journal = {Bioengineering (Basel, Switzerland)}, volume = {11}, number = {1}, pages = {}, pmid = {38247937}, issn = {2306-5354}, support = {2021R1C1C1013861//National Research Foundation of Korea/ ; }, abstract = {The field of the human microbiome is rapidly growing due to the recent advances in high-throughput sequencing technologies. Meanwhile, there have also been many new analytic pipelines, methods and/or tools developed for microbiome data preprocessing and analytics. They are usually focused on microbiome data with continuous (e.g., body mass index) or binary responses (e.g., diseased vs. healthy), yet multi-categorical responses that have more than two categories are also common in reality. In this paper, we introduce a new unified cloud platform, named MiMultiCat, for the analysis of microbiome data with multi-categorical responses. The two main distinguishing features of MiMultiCat are as follows: First, MiMultiCat streamlines a long sequence of microbiome data preprocessing and analytic procedures on user-friendly web interfaces; as such, it is easy to use for many people in various disciplines (e.g., biology, medicine, public health). Second, MiMultiCat performs both association testing and prediction modeling extensively. For association testing, MiMultiCat handles both ecological (e.g., alpha and beta diversity) and taxonomical (e.g., phylum, class, order, family, genus, species) contexts through covariate-adjusted or unadjusted analysis. For prediction modeling, MiMultiCat employs the random forest and gradient boosting algorithms that are well suited to microbiome data while providing nice visual interpretations. We demonstrate its use through the reanalysis of gut microbiome data on obesity with body mass index categories. MiMultiCat is freely available on our web server.}, } @article {pmid38235187, year = {2024}, author = {Xun, D and Wang, R and Zhang, X and Wang, Y}, title = {Microsnoop: A generalist tool for microscopy image representation.}, journal = {Innovation (Cambridge (Mass.))}, volume = {5}, number = {1}, pages = {100541}, pmid = {38235187}, issn = {2666-6758}, abstract = {Accurate profiling of microscopy images from small scale to high throughput is an essential procedure in basic and applied biological research. Here, we present Microsnoop, a novel deep learning-based representation tool trained on large-scale microscopy images using masked self-supervised learning. Microsnoop can process various complex and heterogeneous images, and we classified images into three categories: single-cell, full-field, and batch-experiment images. Our benchmark study on 10 high-quality evaluation datasets, containing over 2,230,000 images, demonstrated Microsnoop's robust and state-of-the-art microscopy image representation ability, surpassing existing generalist and even several custom algorithms. Microsnoop can be integrated with other pipelines to perform tasks such as superresolution histopathology image and multimodal analysis. Furthermore, Microsnoop can be adapted to various hardware and can be easily deployed on local or cloud computing platforms. We will regularly retrain and reevaluate the model using community-contributed data to consistently improve Microsnoop.}, } @article {pmid38235176, year = {2024}, author = {Putra, IMS and Siahaan, D and Saikhu, A}, title = {SNLI Indo: A recognizing textual entailment dataset in Indonesian derived from the Stanford Natural Language Inference dataset.}, journal = {Data in brief}, volume = {52}, number = {}, pages = {109998}, pmid = {38235176}, issn = {2352-3409}, abstract = {Recognizing textual entailment (RTE) is an essential task in natural language processing (NLP). It is the task of determining the inference relationship between text fragments (premise and hypothesis), of which the inference relationship is either entailment (true), contradiction (false), or neutral (undetermined). The most popular approach for RTE is neural networks, which has resulted in the best RTE models. Neural network approaches, in particular deep learning, are data-driven and, consequently, the quantity and quality of the data significantly influences the performance of these approaches. Therefore, we introduce SNLI Indo, a large-scale RTE dataset in the Indonesian language, which was derived from the Stanford Natural Language Inference (SNLI) corpus by translating the original sentence pairs. SNLI is a large-scale dataset that contains premise-hypothesis pairs that were generated using a crowdsourcing framework. The SNLI dataset is comprised of a total of 569,027 sentence pairs with the distribution of sentence pairs as follows: 549,365 pairs for training, 9,840 pairs for model validation, and 9,822 pairs for testing. We translated the original sentence pairs of the SNLI dataset from English to Indonesian using the Google Cloud Translation API. The existence of SNLI Indo addresses the resource gap in the field of NLP for the Indonesian language. Even though large datasets are available in other languages, in particular English, the SNLI Indo dataset enables a more optimal development of deep learning models for RTE in the Indonesian language.}, } @article {pmid38235174, year = {2024}, author = {Koulgi, P and Jumani, S}, title = {Dataset of temporal trends of surface water area across India's rivers and basins.}, journal = {Data in brief}, volume = {52}, number = {}, pages = {109991}, pmid = {38235174}, issn = {2352-3409}, abstract = {This dataset [1] quantifies the extent and rate of annual change in surface water area (SWA) across India's rivers and basins over a period of 30 years spanning 1991 to 2020. This data has been derived from the Global Surface Water Explorer, which maps historical terrestrial surface water occurrence globally using the Landsat satellite image archive since 1984, at a spatial resolution of 30 m/pixel and a temporal resolution of once a month. This monthly time-series was used to create annual composites of wet-season (October, November, December), dry-season (February, March, April), and permanent (October, November, December, February, March, April) surface water extent, which were then used to estimate annual rates of change. To estimate SWA trends for both river networks and their basins, we conducted our analysis at two spatial scales - (1) cross-sectional reaches (transects) across river networks, and (2) sub-basins within river catchments. For each reach and sub-basin (henceforth basin), temporal trends in wet-season, dry-season, and permanent SWA were estimated using the non-parametric Sen's slope estimator. For every valid reach and basin, the temporal timeseries of invalid or missing data was also computed as a fractional area to inform the level of certainty associated with reported SWA trends estimates. In addition to a Zenodo data repository, this data [1] is presented as an interactive web application (https://sites.google.com/view/surface-water-trends-india/; henceforth Website) to allow users to visualize the trends of permanent, wet-season, and dry-season water along with the extent of missing data for individual transects or basins across India. The Website provides a simple user interface to enable users to download seasonal time-series of SWA for any region of interest at the scale of the river network or basin. The Website also provides details about accessing the annual permanent, dry and wet season composites, which are stored as publicly accessible cloud assets on the Google Earth Engine platform. The spatial (basin and reach) and temporal (wet season, dry season, and permanent water scenarios) scales of information provided in this dataset yield a granular understanding of water systems in India. We envision this dataset to serve as a baseline information layer that can be used in combination with other data sources to support regional analysis of hydrologic trends, watershed-based analysis, and conservation planning. Specific applications include, but are not limited to, monitoring and identifying at-risk wetlands, visualizing and measuring changes to surface water extent before and after water infrastructure projects (such as dams and water abstraction projects), mapping drought prone regions, and mapping natural and anthropogenic changes to SWA along river networks. Intended users include, but are not limited to, students, academics, decision-makers, planners, policymakers, activists, and others interested in water-related issues.}, } @article {pmid38231538, year = {2024}, author = {Gheisari, M and Ghaderzadeh, M and Li, H and Taami, T and Fernández-Campusano, C and Sadeghsalehi, H and Afzaal Abbasi, A}, title = {Mobile Apps for COVID-19 Detection and Diagnosis for Future Pandemic Control: Multidimensional Systematic Review.}, journal = {JMIR mHealth and uHealth}, volume = {12}, number = {}, pages = {e44406}, pmid = {38231538}, issn = {2291-5222}, mesh = {Humans ; *COVID-19/diagnosis/epidemiology/prevention & control ; *Mobile Applications ; Pandemics/prevention & control ; }, abstract = {BACKGROUND: In the modern world, mobile apps are essential for human advancement, and pandemic control is no exception. The use of mobile apps and technology for the detection and diagnosis of COVID-19 has been the subject of numerous investigations, although no thorough analysis of COVID-19 pandemic prevention has been conducted using mobile apps, creating a gap.

OBJECTIVE: With the intention of helping software companies and clinical researchers, this study provides comprehensive information regarding the different fields in which mobile apps were used to diagnose COVID-19 during the pandemic.

METHODS: In this systematic review, 535 studies were found after searching 5 major research databases (ScienceDirect, Scopus, PubMed, Web of Science, and IEEE). Of these, only 42 (7.9%) studies concerned with diagnosing and detecting COVID-19 were chosen after applying inclusion and exclusion criteria using the PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) protocol.

RESULTS: Mobile apps were categorized into 6 areas based on the content of these 42 studies: contact tracing, data gathering, data visualization, artificial intelligence (AI)-based diagnosis, rule- and guideline-based diagnosis, and data transformation. Patients with COVID-19 were identified via mobile apps using a variety of clinical, geographic, demographic, radiological, serological, and laboratory data. Most studies concentrated on using AI methods to identify people who might have COVID-19. Additionally, symptoms, cough sounds, and radiological images were used more frequently compared to other data types. Deep learning techniques, such as convolutional neural networks, performed comparatively better in the processing of health care data than other types of AI techniques, which improved the diagnosis of COVID-19.

CONCLUSIONS: Mobile apps could soon play a significant role as a powerful tool for data collection, epidemic health data analysis, and the early identification of suspected cases. These technologies can work with the internet of things, cloud storage, 5th-generation technology, and cloud computing. Processing pipelines can be moved to mobile device processing cores using new deep learning methods, such as lightweight neural networks. In the event of future pandemics, mobile apps will play a critical role in rapid diagnosis using various image data and clinical symptoms. Consequently, the rapid diagnosis of these diseases can improve the management of their effects and obtain excellent results in treating patients.}, } @article {pmid38228707, year = {2024}, author = {Simaiya, S and Lilhore, UK and Sharma, YK and Rao, KBVB and Maheswara Rao, VVR and Baliyan, A and Bijalwan, A and Alroobaea, R}, title = {A hybrid cloud load balancing and host utilization prediction method using deep learning and optimization techniques.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {1337}, pmid = {38228707}, issn = {2045-2322}, abstract = {Virtual machine (VM) integration methods have effectively proven an optimized load balancing in cloud data centers. The main challenge with VM integration methods is the trade-off among cost effectiveness, quality of service, performance, optimal resource utilization and compliance with service level agreement violations. Deep Learning methods are widely used in existing research on cloud load balancing. However, there is still a problem with acquiring noisy multilayered fluctuations in workload due to the limited resource-level provisioning. The long short-term memory (LSTM) model plays a vital role in the prediction of server load and workload provisioning. This research presents a hybrid model using deep learning with Particle Swarm Intelligence and Genetic Algorithm ("DPSO-GA") for dynamic workload provisioning in cloud computing. The proposed model works in two phases. The first phase utilizes a hybrid PSO-GA approach to address the prediction challenge by combining the benefits of these two methods in fine-tuning the Hyperparameters. In the second phase, CNN-LSTM is utilized. Before using the CNN-LSTM approach to forecast the consumption of resources, a hybrid approach, PSO-GA, is used for training it. In the proposed framework, a one-dimensional CNN and LSTM are used to forecast the cloud resource utilization at various subsequent time steps. The LSTM module simulates temporal information that predicts the upcoming VM workload, while a CNN module extracts complicated distinguishing features gathered from VM workload statistics. The proposed model simultaneously integrates the resource utilization in a multi-resource utilization, which helps overcome the load balancing and over-provisioning issues. Comprehensive simulations are carried out utilizing the Google cluster traces benchmarks dataset to verify the efficiency of the proposed DPSO-GA technique in enhancing the distribution of resources and load balancing for the cloud. The proposed model achieves outstanding results in terms of better precision, accuracy and load allocation.}, } @article {pmid38218894, year = {2024}, author = {Zhao, Y and Sazlina, SG and Rokhani, FZ and Chinna, K and Su, J and Chew, BH}, title = {The expectations and acceptability of a smart nursing home model among Chinese older adults: a mixed methods study.}, journal = {BMC nursing}, volume = {23}, number = {1}, pages = {40}, pmid = {38218894}, issn = {1472-6955}, abstract = {BACKGROUND: Smart nursing homes (SNHs) integrate advanced technologies, including IoT, digital health, big data, AI, and cloud computing to optimise remote clinical services, monitor abnormal events, enhance decision-making, and support daily activities for older residents, ensuring overall well-being in a safe and cost-effective environment. This study developed and validated a 24-item Expectation and Acceptability of Smart Nursing Homes Questionnaire (EASNH-Q), and examined the levels of expectations and acceptability of SNHs and associated factors among older adults in China.

METHODS: This was an exploratory sequential mixed methods study, where the qualitative case study was conducted in Hainan and Dalian, while the survey was conducted in Xi'an, Nanjing, Shenyang, and Xiamen. The validation of EASNH-Q also included exploratory and confirmatory factor analyses. Multinomial logistic regression analysis was used to estimate the determinants of expectations and acceptability of SNHs.

RESULTS: The newly developed EASNH-Q uses a Likert Scale ranging from 1 (strongly disagree) to 5 (strongly agree), and underwent validation and refinement from 49 items to the final 24 items. The content validity indices for relevance, comprehensibility, and comprehensiveness were all above 0.95. The expectations and acceptability of SNHs exhibited a strong correlation (r = 0.85, p < 0.01), and good test-retest reliability for expectation (0.90) and acceptability (0.81). The highest tertile of expectations (X[2]=28.89, p < 0.001) and acceptability (X[2]=25.64, p < 0.001) towards SNHs were significantly associated with the willingness to relocate to such facilities. Older adults with self-efficacy in applying smart technologies (OR: 28.0) and those expressing a willingness to move to a nursing home (OR: 3.0) were more likely to have the highest tertile of expectations compared to those in the lowest tertile. Similarly, older adults with self-efficacy in applying smart technologies were more likely to be in the highest tertile of acceptability of SNHs (OR: 13.8).

CONCLUSIONS: EASNH-Q demonstrated commendable validity, reliability, and stability. The majority of Chinese older adults have high expectations for and accept SNHs. Self-efficacy in applying smart technologies and willingness to relocate to a nursing home associated with high expectations and acceptability of SNHs.}, } @article {pmid38218892, year = {2024}, author = {Putzier, M and Khakzad, T and Dreischarf, M and Thun, S and Trautwein, F and Taheri, N}, title = {Implementation of cloud computing in the German healthcare system.}, journal = {NPJ digital medicine}, volume = {7}, number = {1}, pages = {12}, pmid = {38218892}, issn = {2398-6352}, abstract = {With the advent of artificial intelligence and Big Data - projects, the necessity for a transition from analog medicine to modern-day solutions such as cloud computing becomes unavoidable. Even though this need is now common knowledge, the process is not always easy to start. Legislative changes, for example at the level of the European Union, are helping the respective healthcare systems to take the necessary steps. This article provides an overview of how a German university hospital is dealing with European data protection laws on the integration of cloud computing into everyday clinical practice. By describing our model approach, we aim to identify opportunities and possible pitfalls to sustainably influence digitization in Germany.}, } @article {pmid38218746, year = {2024}, author = {Chen, M and Wei, Z and Li, L and Zhang, K}, title = {Edge computing-based proactive control method for industrial product manufacturing quality prediction.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {1288}, pmid = {38218746}, issn = {2045-2322}, abstract = {With the emergence of intelligent manufacturing, new-generation information technologies such as big data and artificial intelligence are rapidly integrating with the manufacturing industry. One of the primary applications is to assist manufacturing plants in predicting product quality. Traditional predictive models primarily focus on establishing high-precision classification or regression models, with less emphasis on imbalanced data. This is a specific but common scenario in practical industrial environments concerning quality prediction. A SMOTE-XGboost quality prediction active control method based on joint optimization hyperparameters is proposed to address the problem of imbalanced data classification in product quality prediction. In addition, edge computing technology is introduced to address issues in industrial manufacturing, such as the large bandwidth load and resource limitations associated with traditional cloud computing models. Finally, the practicality and effectiveness of the proposed method are validated through a case study of the brake disc production line. Experimental results indicate that the proposed method outperforms other classification methods in brake disc quality prediction.}, } @article {pmid38215330, year = {2024}, author = {Zhao, B and Chen, WN and Wei, FF and Liu, X and Pei, Q and Zhang, J}, title = {PEGA: A Privacy-Preserving Genetic Algorithm for Combinatorial Optimization.}, journal = {IEEE transactions on cybernetics}, volume = {54}, number = {6}, pages = {3638-3651}, doi = {10.1109/TCYB.2023.3346863}, pmid = {38215330}, issn = {2168-2275}, abstract = {Evolutionary algorithms (EAs), such as the genetic algorithm (GA), offer an elegant way to handle combinatorial optimization problems (COPs). However, limited by expertise and resources, most users lack the capability to implement EAs for solving COPs. An intuitive and promising solution is to outsource evolutionary operations to a cloud server, however, it poses privacy concerns. To this end, this article proposes a novel computing paradigm called evolutionary computation as a service (ECaaS), where a cloud server renders evolutionary computation services for users while ensuring their privacy. Following the concept of ECaaS, this article presents privacy-preserving genetic algorithm (PEGA), a privacy-preserving GA designed specifically for COPs. PEGA enables users, regardless of their domain expertise or resource availability, to outsource COPs to the cloud server that holds a competitive GA and approximates the optimal solution while safeguarding privacy. Notably, PEGA features the following characteristics. First, PEGA empowers users without domain expertise or sufficient resources to solve COPs effectively. Second, PEGA protects the privacy of users by preventing the leakage of optimization problem details. Third, PEGA performs comparably to the conventional GA when approximating the optimal solution. To realize its functionality, we implement PEGA falling in a twin-server architecture and evaluate it on two widely known COPs: 1) the traveling Salesman problem (TSP) and 2) the 0/1 knapsack problem (KP). Particularly, we utilize encryption cryptography to protect users' privacy and carefully design a suite of secure computing protocols to support evolutionary operators of GA on encrypted chromosomes. Privacy analysis demonstrates that PEGA successfully preserves the confidentiality of COP contents. Experimental evaluation results on several TSP datasets and KP datasets reveal that PEGA performs equivalently to the conventional GA in approximating the optimal solution.}, } @article {pmid38215168, year = {2024}, author = {Sun, X and Sun, W and Wang, Z}, title = {Novel enterprises digital transformation influence empirical study.}, journal = {PloS one}, volume = {19}, number = {1}, pages = {e0296693}, pmid = {38215168}, issn = {1932-6203}, mesh = {China ; *Big Data ; *Cloud Computing ; Commerce ; Empirical Research ; }, abstract = {With the rapid development of technologies such as cloud computing and big data, various levels of government departments in the country have successively introduced digital subsidy policies to promote enterprises' digital transformation. However, the effectiveness of these policies and their ability to truly achieve policy objectives have become pressing concerns across society. Against this backdrop, this paper employs a moderated mediation effects model to empirically analyze the incentive effects of financial subsidies on the digital transformation of A-share listed manufacturing companies in the Shanghai and Shenzhen stock markets from 2013 to 2022. The research findings indicate a significant promotion effect of financial subsidies on the digital transformation of manufacturing enterprises, especially demonstrating a notable incentive impact on the digital transformation of large enterprises, non-asset-intensive enterprises, technology-intensive enterprises, and non-labor-intensive enterprises. However, the incentive effect on the digital transformation of small and medium-sized enterprises (SMEs), asset-intensive enterprises, non-technology-intensive enterprises, and labor-intensive enterprises is not significant. Notably, the expansion of financial subsidies positively influences the augmentation of R&D investment within manufacturing enterprises, subsequently providing indirect encouragement for their digital transformation. Additionally, the incorporation of the degree of marketization implies its potential to moderate both the direct and indirect impacts of financial subsidies on enterprise digital transformation. This study enriches the research on the mechanism of the role of financial subsidies in digital transformation and provides empirical evidence on how market participation influences the effects of financial subsidies, thereby assisting policymakers in comprehensively understanding the impact of financial subsidy policies on different types of enterprises.}, } @article {pmid38215070, year = {2024}, author = {Fan, Y}, title = {Load balance -aware dynamic cloud-edge-end collaborative offloading strategy.}, journal = {PloS one}, volume = {19}, number = {1}, pages = {e0296897}, pmid = {38215070}, issn = {1932-6203}, mesh = {*Awareness ; *Cloud Computing ; }, abstract = {Cloud-edge-end (CEE) computing is a hybrid computing paradigm that converges the principles of edge and cloud computing. In the design of CEE systems, a crucial challenge is to develop efficient offloading strategies to achieve the collaboration of edge and cloud offloading. Although CEE offloading problems have been widely studied under various backgrounds and methodologies, load balance, which is an indispensable scheme in CEE systems to ensure the full utilization of edge resources, is still a factor that has not yet been accounted for. To fill this research gap, we are devoted to developing a dynamic load balance -aware CEE offloading strategy. First, we propose a load evolution model to characterize the influences of offloading strategies on the system load dynamics and, on this basis, establish a latency model as a performance metric of different offloading strategies. Then, we formulate an optimal control model to seek the optimal offloading strategy that minimizes the latency. Second, we analyze the feasibility of typical optimal control numerical methods in solving our proposed model, and develop a numerical method based on the framework of genetic algorithm. Third, through a series of numerical experiments, we verify our proposed method. Results show that our method is effective.}, } @article {pmid38212989, year = {2024}, author = {Peltzer, A and Mohr, C and Stadermann, KB and Zwick, M and Schmid, R}, title = {nf-core/nanostring: a pipeline for reproducible NanoString nCounter analysis.}, journal = {Bioinformatics (Oxford, England)}, volume = {40}, number = {1}, pages = {}, pmid = {38212989}, issn = {1367-4811}, support = {//Boehringer Ingelheim Pharma GmbH & Co/ ; }, mesh = {*Software ; *Language ; Cloud Computing ; Workflow ; Quality Control ; }, abstract = {MOTIVATION: The NanoString™ nCounter® technology platform is a widely used targeted quantification platform for the analysis of gene expression of up to ∼800 genes. Whereas the software tools by the manufacturer can perform the analysis in an interactive and GUI driven approach, there is no portable and user-friendly workflow available that can be used to perform reproducible analysis of multiple samples simultaneously in a scalable fashion on different computing infrastructures.

RESULTS: Here, we present the nf-core/nanostring open-source pipeline to perform a comprehensive analysis including quality control and additional features such as expression visualization, annotation with additional metadata and input creation for differential gene expression analysis. The workflow features an easy installation, comprehensive documentation, open-source code with the possibility for further extensions, a strong portability across multiple computing environments and detailed quality metrics reporting covering all parts of the pipeline. nf-core/nanostring has been implemented in the Nextflow workflow language and supports Docker, Singularity, Podman container technologies as well as Conda environments, enabling easy deployment on any Nextflow supported compatible system, including most widely used cloud computing environments such as Google GCP or Amazon AWS.

The source code, documentation and installation instructions as well as results for continuous tests are freely available at https://github.com/nf-core/nanostring and https://nf-co.re/nanostring.}, } @article {pmid38212192, year = {2024}, author = {Ayeni, KI and Berry, D and Ezekiel, CN and Warth, B}, title = {Enhancing microbiome research in sub-Saharan Africa.}, journal = {Trends in microbiology}, volume = {32}, number = {2}, pages = {111-115}, doi = {10.1016/j.tim.2023.11.003}, pmid = {38212192}, issn = {1878-4380}, mesh = {Humans ; Africa South of the Sahara ; *Microbiota ; }, abstract = {While there are lighthouse examples of microbiome research in sub-Saharan Africa (SSA), a significant proportion of local researchers face several challenges. Here, we highlight prevailing issues limiting microbiome research in SSA and suggest potential technological, societal, and research-based solutions. We emphasize the need for considerable investment in infrastructures, training, and appropriate funding to democratize modern technologies with a view to providing useful data to improve human health.}, } @article {pmid38203138, year = {2024}, author = {An, X and Cai, B and Chai, L}, title = {Research on Over-the-Horizon Perception Distance Division of Optical Fiber Communication Based on Intelligent Roadways.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {1}, pages = {}, pmid = {38203138}, issn = {1424-8220}, abstract = {With the construction and application of more and more intelligent networking demonstration projects, a large number of advanced roadside digital infrastructures are deployed on both sides of the intelligent road. These devices sense the road situation in real time through algorithms and transmit it to edge computing units and cloud control platforms through high-speed optical fiber transmission networks. This article proposes a cloud edge terminal architecture system based on cloud edge cooperation, as well as a data exchange protocol for cloud control basic platforms. The over-the-horizon scene division and optical fiber network communication model are verified by deploying intelligent roadside devices on the intelligent highway. At the same time, this article uses the optical fiber network communication algorithm and ModelScope large model to model inference on real-time video data. The actual data results show that the StreamYOLO (Stream You Only Look Once) model can use the Streaming Perception method to detect and continuously track target vehicles in real-time videos. Finally, the method proposed in this article was experimentally validated in an actual smart highway digital infrastructure construction project. The experimental results demonstrate the high application value and promotion prospects of the fiber optic network in the division of over the horizon perception distance in intelligent roadways construction.}, } @article {pmid38203103, year = {2023}, author = {Sheik, AT and Maple, C and Epiphaniou, G and Dianati, M}, title = {Securing Cloud-Assisted Connected and Autonomous Vehicles: An In-Depth Threat Analysis and Risk Assessment.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {1}, pages = {}, pmid = {38203103}, issn = {1424-8220}, abstract = {As threat vectors and adversarial capabilities evolve, Cloud-Assisted Connected and Autonomous Vehicles (CCAVs) are becoming more vulnerable to cyberattacks. Several established threat analysis and risk assessment (TARA) methodologies are publicly available to address the evolving threat landscape. However, these methodologies inadequately capture the threat data of CCAVs, resulting in poorly defined threat boundaries or the reduced efficacy of the TARA. This is due to multiple factors, including complex hardware-software interactions, rapid technological advancements, outdated security frameworks, heterogeneous standards and protocols, and human errors in CCAV systems. To address these factors, this study begins by systematically evaluating TARA methods and applying the Spoofing, Tampering, Repudiation, Information disclosure, Denial of service, and Elevation of privileges (STRIDE) threat model and Damage, Reproducibility, Exploitability, Affected Users, and Discoverability (DREAD) risk assessment to target system architectures. This study identifies vulnerabilities, quantifies risks, and methodically examines defined data processing components. In addition, this study offers an attack tree to delineate attack vectors and provides a novel defense taxonomy against identified risks. This article demonstrates the efficacy of the TARA in systematically capturing compromised security requirements, threats, limits, and associated risks with greater precision. By doing so, we further discuss the challenges in protecting hardware-software assets against multi-staged attacks due to emerging vulnerabilities. As a result, this research informs advanced threat analyses and risk management strategies for enhanced security engineering of cyberphysical CCAV systems.}, } @article {pmid38203078, year = {2023}, author = {Suo, L and Ma, H and Jiao, W and Liu, X}, title = {Job-Deadline-Guarantee-Based Joint Flow Scheduling and Routing Scheme in Data Center Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {1}, pages = {}, pmid = {38203078}, issn = {1424-8220}, support = {62101415//National Natural Science Foundation of China/ ; }, abstract = {Many emerging Internet of Things (IoT) applications deployed on cloud platforms have strict latency requirements or deadline constraints, and thus meeting the deadlines is crucial to ensure the quality of service for users and the revenue for service providers in these delay-stringent IoT applications. Efficient flow scheduling in data center networks (DCNs) plays a major role in reducing the execution time of jobs and has garnered significant attention in recent years. However, only few studies have attempted to combine job-level flow scheduling and routing to guarantee meeting the deadlines of multi-stage jobs. In this paper, an efficient heuristic joint flow scheduling and routing (JFSR) scheme is proposed. First, targeting maximizing the number of jobs for which the deadlines have been met, we formulate the joint flow scheduling and routing optimization problem for multiple multi-stage jobs. Second, due to its mathematical intractability, this problem is decomposed into two sub-problems: inter-coflow scheduling and intra-coflow scheduling. In the first sub-problem, coflows from different jobs are scheduled according to their relative remaining times; in the second sub-problem, an iterative coflow scheduling and routing (ICSR) algorithm is designed to alternately optimize the routing path and bandwidth allocation for each scheduled coflow. Finally, simulation results demonstrate that the proposed JFSR scheme can significantly increase the number of jobs for which the deadlines have been met in DCNs.}, } @article {pmid38203015, year = {2023}, author = {Oyucu, S and Polat, O and Türkoğlu, M and Polat, H and Aksöz, A and Ağdaş, MT}, title = {Ensemble Learning Framework for DDoS Detection in SDN-Based SCADA Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {1}, pages = {}, pmid = {38203015}, issn = {1424-8220}, support = {101084323//European Union's Horizon Europe research and innovation programme/ ; }, abstract = {Supervisory Control and Data Acquisition (SCADA) systems play a crucial role in overseeing and controlling renewable energy sources like solar, wind, hydro, and geothermal resources. Nevertheless, with the expansion of conventional SCADA network infrastructures, there arise significant challenges in managing and scaling due to increased size, complexity, and device diversity. Using Software Defined Networking (SDN) technology in traditional SCADA network infrastructure offers management, scaling and flexibility benefits. However, as the integration of SDN-based SCADA systems with modern technologies such as the Internet of Things, cloud computing, and big data analytics increases, cybersecurity becomes a major concern for these systems. Therefore, cyber-physical energy systems (CPES) should be considered together with all energy systems. One of the most dangerous types of cyber-attacks against SDN-based SCADA systems is Distributed Denial of Service (DDoS) attacks. DDoS attacks disrupt the management of energy resources, causing service interruptions and increasing operational costs. Therefore, the first step to protect against DDoS attacks in SDN-based SCADA systems is to develop an effective intrusion detection system. This paper proposes a Decision Tree-based Ensemble Learning technique to detect DDoS attacks in SDN-based SCADA systems by accurately distinguishing between normal and DDoS attack traffic. For training and testing the ensemble learning models, normal and DDoS attack traffic data are obtained over a specific simulated experimental network topology. Techniques based on feature selection and hyperparameter tuning are used to optimize the performance of the decision tree ensemble models. Experimental results show that feature selection, combination of different decision tree ensemble models, and hyperparameter tuning can lead to a more accurate machine learning model with better performance detecting DDoS attacks against SDN-based SCADA systems.}, } @article {pmid38203012, year = {2023}, author = {Rodríguez-Azar, PI and Mejía-Muñoz, JM and Cruz-Mejía, O and Torres-Escobar, R and López, LVR}, title = {Fog Computing for Control of Cyber-Physical Systems in Industry Using BCI.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {1}, pages = {}, pmid = {38203012}, issn = {1424-8220}, abstract = {Brain-computer interfaces use signals from the brain, such as EEG, to determine brain states, which in turn can be used to issue commands, for example, to control industrial machinery. While Cloud computing can aid in the creation and operation of industrial multi-user BCI systems, the vast amount of data generated from EEG signals can lead to slow response time and bandwidth problems. Fog computing reduces latency in high-demand computation networks. Hence, this paper introduces a fog computing solution for BCI processing. The solution consists in using fog nodes that incorporate machine learning algorithms to convert EEG signals into commands to control a cyber-physical system. The machine learning module uses a deep learning encoder to generate feature images from EEG signals that are subsequently classified into commands by a random forest. The classification scheme is compared using various classifiers, being the random forest the one that obtained the best performance. Additionally, a comparison was made between the fog computing approach and using only cloud computing through the use of a fog computing simulator. The results indicate that the fog computing method resulted in less latency compared to the solely cloud computing approach.}, } @article {pmid38202896, year = {2023}, author = {Feng, YC and Zeng, SY and Liang, TY}, title = {Part2Point: A Part-Oriented Point Cloud Reconstruction Framework.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {1}, pages = {}, pmid = {38202896}, issn = {1424-8220}, support = {NSTC112-2221-E992-068//National Science and Technology Council in Taiwan/ ; }, abstract = {Three-dimensional object modeling is necessary for developing virtual and augmented reality applications. Traditionally, application engineers must manually use art software to edit object shapes or exploit LIDAR to scan physical objects for constructing 3D models. This is very time-consuming and costly work. Fortunately, GPU recently provided a cost-effective solution for massive data computation. With GPU support, many studies have proposed 3D model generators based on different learning architectures, which can automatically convert 2D object pictures into 3D object models with good performance. However, as the demand for model resolution increases, the required computing time and memory space increase as significantly as the parameters of the learning architecture, which seriously degrades the efficiency of 3D model construction and the feasibility of resolution improvement. To resolve this problem, this paper proposes a part-oriented point cloud reconstruction framework called Part2Point. This framework segments the object's parts, reconstructs the point cloud for individual object parts, and combines the part point clouds into the complete object point cloud. Therefore, it can reduce the number of learning network parameters at the exact resolution, effectively minimizing the calculation time cost and the required memory space. Moreover, it can improve the resolution of the reconstructed point cloud so that the reconstructed model can present more details of object parts.}, } @article {pmid38200074, year = {2024}, author = {Chen, C and Gong, L and Luo, X and Wang, F}, title = {Research on a new management model of distribution Internet of Things.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {995}, pmid = {38200074}, issn = {2045-2322}, support = {2020-KJLH-PH-006//Science and Technology Project of Zhejiang Electric Power Company/ ; }, abstract = {Based on the characteristics of controllable intelligence of the Internet of Things (IoT) and the requirements of the new distribution Network for function and transmission delay, this study proposes a method of combining edge collaborative computing and distribution Network station area, and builds a distribution Network management structure model by combining the Packet Transport Network (PTN) Network structure. The multi-terminal node distribution model of distributed IoT is established. Finally, a distribution IoT management model is constructed based on the edge multi-node cooperative reasoning algorithm and collaborative computing architecture model. The purpose of this paper is to solve the problem of large reasoning delay caused by heavy computing tasks in distribution cloud servers. The final results show that the model reduces the inference delay of cloud computing when a large number of smart device terminals of distribution IoT are connected to the network.}, } @article {pmid38197934, year = {2024}, author = {Cheong, RCT and Jawad, S and Adams, A and Campion, T and Lim, ZH and Papachristou, N and Unadkat, S and Randhawa, P and Joseph, J and Andrews, P and Taylor, P and Kunz, H}, title = {Enhancing paranasal sinus disease detection with AutoML: efficient AI development and evaluation via magnetic resonance imaging.}, journal = {European archives of oto-rhino-laryngology : official journal of the European Federation of Oto-Rhino-Laryngological Societies (EUFOS) : affiliated with the German Society for Oto-Rhino-Laryngology - Head and Neck Surgery}, volume = {281}, number = {4}, pages = {2153-2158}, pmid = {38197934}, issn = {1434-4726}, mesh = {Humans ; *Artificial Intelligence ; Machine Learning ; Magnetic Resonance Imaging ; Head ; *Paranasal Sinus Diseases/diagnostic imaging ; }, abstract = {PURPOSE: Artificial intelligence (AI) in the form of automated machine learning (AutoML) offers a new potential breakthrough to overcome the barrier of entry for non-technically trained physicians. A Clinical Decision Support System (CDSS) for screening purposes using AutoML could be beneficial to ease the clinical burden in the radiological workflow for paranasal sinus diseases.

METHODS: The main target of this work was the usage of automated evaluation of model performance and the feasibility of the Vertex AI image classification model on the Google Cloud AutoML platform to be trained to automatically classify the presence or absence of sinonasal disease. The dataset is a consensus labelled Open Access Series of Imaging Studies (OASIS-3) MRI head dataset by three specialised head and neck consultant radiologists. A total of 1313 unique non-TSE T2w MRI head sessions were used from the OASIS-3 repository.

RESULTS: The best-performing image classification model achieved a precision of 0.928. Demonstrating the feasibility and high performance of the Vertex AI image classification model to automatically detect the presence or absence of sinonasal disease on MRI.

CONCLUSION: AutoML allows for potential deployment to optimise diagnostic radiology workflows and lay the foundation for further AI research in radiology and otolaryngology. The usage of AutoML could serve as a formal requirement for a feasibility study.}, } @article {pmid38195165, year = {2024}, author = {Chen, J and Yin, D and Wong, HYH and Duan, X and Yu, KHO and Ho, JWK}, title = {Vulture: cloud-enabled scalable mining of microbial reads in public scRNA-seq data.}, journal = {GigaScience}, volume = {13}, number = {}, pages = {}, pmid = {38195165}, issn = {2047-217X}, support = {//Innovation and Technology Commission - Hong Kong/ ; }, mesh = {Humans ; Benchmarking ; *Carcinoma, Hepatocellular/genetics ; DNA Copy Number Variations ; Hepatitis B virus ; *Liver Neoplasms ; Single-Cell Gene Expression Analysis ; }, abstract = {The rapidly growing collection of public single-cell sequencing data has become a valuable resource for molecular, cellular, and microbial discovery. Previous studies mostly overlooked detecting pathogens in human single-cell sequencing data. Moreover, existing bioinformatics tools lack the scalability to deal with big public data. We introduce Vulture, a scalable cloud-based pipeline that performs microbial calling for single-cell RNA sequencing (scRNA-seq) data, enabling meta-analysis of host-microbial studies from the public domain. In our benchmarking experiments, Vulture is 66% to 88% faster than local tools (PathogenTrack and Venus) and 41% faster than the state-of-the-art cloud-based tool Cumulus, while achieving comparable microbial read identification. In terms of the cost on cloud computing systems, Vulture also shows a cost reduction of 83% ($12 vs. ${\$}$70). We applied Vulture to 2 coronavirus disease 2019, 3 hepatocellular carcinoma (HCC), and 2 gastric cancer human patient cohorts with public sequencing reads data from scRNA-seq experiments and discovered cell type-specific enrichment of severe acute respiratory syndrome coronavirus 2, hepatitis B virus (HBV), and Helicobacter pylori-positive cells, respectively. In the HCC analysis, all cohorts showed hepatocyte-only enrichment of HBV, with cell subtype-associated HBV enrichment based on inferred copy number variations. In summary, Vulture presents a scalable and economical framework to mine unknown host-microbial interactions from large-scale public scRNA-seq data. Vulture is available via an open-source license at https://github.com/holab-hku/Vulture.}, } @article {pmid38192752, year = {2024}, author = {Tan, X and Zhao, D and Wang, M and Wang, X and Wang, X and Liu, W and Ghobaei-Arani, M}, title = {A decision-making mechanism for task offloading using learning automata and deep learning in mobile edge networks.}, journal = {Heliyon}, volume = {10}, number = {1}, pages = {e23651}, pmid = {38192752}, issn = {2405-8440}, abstract = {The development of mobile networks has led to the emergence of challenges such as high delays in storage, computing and traffic management. To deal with these challenges, fifth-generation networks emphasize the use of technologies such as mobile cloud computing and mobile edge computing. Mobile Edge Cloud Computing (MECC) is an emerging distributed computing model that provides access to cloud computing services at the edge of the network and near mobile users. With offloading tasks at the edge of the network instead of transferring them to a remote cloud, MECC can realize flexibility and real-time processing. During computation offloading, the requirements of Internet of Things (IoT) applications may change at different stages, which is ignored in existing works. With this motivation, we propose a task offloading method under dynamic resource requirements during the use of IoT applications, which focuses on the problem of workload fluctuations. The proposed method uses a learning automata-based offload decision-maker to offload requests to the edge layer. An auto-scaling strategy is then developed using a long short-term memory network which can estimate the expected number of future requests. Finally, an Asynchronous Advantage Actor-Critic algorithm as a deep reinforcement learning-based approach decides to scale down or scale up. The effectiveness of the proposed method has been confirmed through extensive experiments using the iFogSim simulator. The numerical results show that the proposed method has better scalability and performance in terms of delay and energy consumption than the existing state-of-the-art methods.}, } @article {pmid38192482, year = {2023}, author = {Alabadi, M and Habbal, A}, title = {Next-generation predictive maintenance: leveraging blockchain and dynamic deep learning in a domain-independent system.}, journal = {PeerJ. Computer science}, volume = {9}, number = {}, pages = {e1712}, pmid = {38192482}, issn = {2376-5992}, abstract = {The fourth industrial revolution, often referred to as Industry 4.0, has revolutionized the manufacturing sector by integrating emerging technologies such as artificial intelligence (AI), machine and deep learning, Industrial Internet of Things (IIoT), cloud computing, cyber physical systems (CPSs) and cognitive computing, throughout the production life cycle. Predictive maintenance (PdM) emerges as a critical component, utilizing data analytic to track machine health and proactively detect machinery failures. Deep learning (DL), is pivotal in this context, offering superior accuracy in prediction through neural networks' data processing capabilities. However, DL adoption in PdM faces challenges, including continuous model updates and domain dependence. Meanwhile, centralized DL models, prevalent in PdM, pose security risks such as central points of failure and unauthorized access. To address these issues, this study presents an innovative decentralized PdM system integrating DL, blockchain, and decentralized storage based on the InterPlanetary File System (IPFS) for accurately predicting Remaining Useful Lifetime (RUL). DL handles predictive tasks, while blockchain secures data orchestration. Decentralized storage safeguards model metadata and training data for dynamic models. The system features synchronized two DL pipelines for time series data, encompassing prediction and training mechanisms. The detailed material and methods of this research shed light on the system's development and validation processes. Rigorous validation confirms the system's accuracy, performance, and security through an experimental testbed. The results demonstrate the system's dynamic updating and domain independence. Prediction model surpass state-of-the-art models in terms of the root mean squared error (RMSE) score. Blockchain-based scalability performance was tested based on smart contract gas usage, and the analysis shows efficient performance across varying input and output data scales. A comprehensive CIA analysis highlights the system's robust security features, addressing confidentiality, integrity, and availability aspects. The proposed decentralized predictive maintenance (PdM) system, which incorporates deep learning (DL), blockchain technology, and decentralized storage, has the potential to improve predictive accuracy and overcome significant security and scalability obstacles. Consequently, this system holds promising implications for the advancement of predictive maintenance in the context of Industry 4.0.}, } @article {pmid38192461, year = {2023}, author = {Xiao, J and Chang, C and Wu, P and Ma, Y}, title = {Attribute identification based IoT fog data security control and forwarding.}, journal = {PeerJ. Computer science}, volume = {9}, number = {}, pages = {e1747}, pmid = {38192461}, issn = {2376-5992}, abstract = {As Internet of Things (IoT) applications continue to proliferate, traditional cloud computing is increasingly unable to meet the low-latency demands of these applications. The IoT fog architecture solves this limitation by introducing fog servers in the fog layer that are closer to the IoT devices. However, this architecture lacks authentication mechanisms for information sources, security verification for information transmission, and reasonable allocation of fog nodes. To ensure the secure transmission of end-to-end information in the IoT fog architecture, an attribute identification based security control and forwarding method for IoT fog data (AISCF) is proposed. AISCF applies attribute signatures to the IoT fog architecture and uses software defined network (SDN) to control and forward fog layer data flows. Firstly, IoT devices add attribute identifiers to the data they send based on attribute features. The ingress switch then performs fine-grained access control on the data based on these attribute identifiers. Secondly, SDN uses attribute features as flow table matching items to achieve fine-grained control and forwarding of fog layer data flows based on attribute identifiers. Lastly, the egress switch dynamically samples data flows and verifies the attribute signatures of the sampled data packets at the controller end. Experimental validation has demonstrated that AISCF can effectively detect attacks such as data tampering and forged matching items. Moreover, AISCF imposes minimal overhead on network throughput, CPU utilization and packet forwarding latency, and has practicality in IoT fog architecture.}, } @article {pmid38191935, year = {2024}, author = {Renton, AI and Dao, TT and Johnstone, T and Civier, O and Sullivan, RP and White, DJ and Lyons, P and Slade, BM and Abbott, DF and Amos, TJ and Bollmann, S and Botting, A and Campbell, MEJ and Chang, J and Close, TG and Dörig, M and Eckstein, K and Egan, GF and Evas, S and Flandin, G and Garner, KG and Garrido, MI and Ghosh, SS and Grignard, M and Halchenko, YO and Hannan, AJ and Heinsfeld, AS and Huber, L and Hughes, ME and Kaczmarzyk, JR and Kasper, L and Kuhlmann, L and Lou, K and Mantilla-Ramos, YJ and Mattingley, JB and Meier, ML and Morris, J and Narayanan, A and Pestilli, F and Puce, A and Ribeiro, FL and Rogasch, NC and Rorden, C and Schira, MM and Shaw, TB and Sowman, PF and Spitz, G and Stewart, AW and Ye, X and Zhu, JD and Narayanan, A and Bollmann, S}, title = {Neurodesk: an accessible, flexible and portable data analysis environment for reproducible neuroimaging.}, journal = {Nature methods}, volume = {21}, number = {5}, pages = {804-808}, pmid = {38191935}, issn = {1548-7105}, support = {P41 EB019936/EB/NIBIB NIH HHS/United States ; R01 EB030896/EB/NIBIB NIH HHS/United States ; R01 MH126699/MH/NIMH NIH HHS/United States ; }, mesh = {*Neuroimaging/methods ; *Software ; Humans ; User-Computer Interface ; Reproducibility of Results ; Brain/diagnostic imaging ; }, abstract = {Neuroimaging research requires purpose-built analysis software, which is challenging to install and may produce different results across computing environments. The community-oriented, open-source Neurodesk platform (https://www.neurodesk.org/) harnesses a comprehensive and growing suite of neuroimaging software containers. Neurodesk includes a browser-accessible virtual desktop, command-line interface and computational notebook compatibility, allowing for accessible, flexible, portable and fully reproducible neuroimaging analysis on personal workstations, high-performance computers and the cloud.}, } @article {pmid38187735, year = {2024}, author = {Moctezuma, L and Rivera, LB and van Nouhuijs, F and Orcales, F and Kim, A and Campbell, R and Fuse, M and Pennings, PS}, title = {Using a decision tree to predict the number of COVID cases: a tutorial for beginners.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, doi = {10.1101/2023.12.19.572463}, pmid = {38187735}, issn = {2692-8205}, abstract = {This manuscript describes the development of a module that is part of a learning platform named "NIGMS Sandbox for Cloud-based Learning" https://github.com/NIGMS/NIGMS-Sandbox . The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox at the beginning of this Supplement. This module delivers learning materials on machine learning and decision tree concepts in an interactive format that uses appropriate cloud resources for data access and analyses. Machine learning (ML) is an important tool in biomedical research and can lead to improvements in diagnosis, treatment, and prevention of diseases. During the COVID pandemic ML was used for predictions at the patient and community levels. Given its ubiquity, it is important that future doctors, researchers and teachers get acquainted with ML and its contributions to research. Our goal is to make it easier for everyone to learn about machine learning. The learning module we present here is based on a small COVID dataset, videos, annotated code and the use of Google Colab or the Google Cloud Platform (GCP). The benefit of these platforms is that students do not have to set up a programming environment on their computer which saves time and is also an important democratization factor. The module focuses on learning the basics of decision trees by applying them to COVID data. It introduces basic terminology used in supervised machine learning and its relevance to research. Our experience with biology students at San Francisco State University suggests that the material increases interest in ML.}, } @article {pmid38183538, year = {2024}, author = {Indraja, G and Aashi, A and Vema, VK}, title = {Spatial and temporal classification and prediction of LULC in Brahmani and Baitarni basin using integrated cellular automata models.}, journal = {Environmental monitoring and assessment}, volume = {196}, number = {2}, pages = {117}, pmid = {38183538}, issn = {1573-2959}, mesh = {*Cellular Automata ; *Ecosystem ; Environmental Monitoring ; Algorithms ; Agriculture ; }, abstract = {Monitoring the dynamics of land use and land cover (LULC) is imperative in the changing climate and evolving urbanization patterns worldwide. The shifts in land use have a significant impact on the hydrological response of watersheds across the globe. Several studies have applied machine learning (ML) algorithms using historical LULC maps along with elevation data and slope for predicting future LULC projections. However, the influence of other driving factors such as socio-economic and climatological factors has not been thoroughly explored. In the present study, a sensitivity analysis approach was adopted to understand the effect of both physical (elevation, slope, aspect, etc.) and socio-economic factors such as population density, distance to built-up, and distance to road and rail, as well as climatic factors (mean precipitation) on the accuracy of LULC prediction in the Brahmani and Baitarni (BB) basin of Eastern India. Additionally, in the absence of the recent LULC maps of the basin, three ML algorithms, i.e., random forest (RF), classified and regression trees (CART), and support vector machine (SVM) were utilized for LULC classification for the years 2007, 2014, and 2021 on Google earth engine (GEE) cloud computing platform. Among the three algorithms, RF performed best for classifying built-up areas along with all the other classes as compared to CART and SVM. The prediction results revealed that the proximity to built-up and population growth dominates in modeling LULC over physical factors such as elevation and slope. The analysis of historical data revealed an increase of 351% in built-up areas over the past years (2007-2021), with a corresponding decline in forest and water areas by 12% and 36% respectively. While the future predictions highlighted an increase in built-up class ranging from 11 to 38% during the years 2028-2070, the forested areas are anticipated to decline by 4 to 16%. The overall findings of the present study suggested that the BB basin, despite being primarily agricultural with a significant forest cover, is undergoing rapid expansion of built-up areas through the encroachment of agricultural and forested lands, which could have far-reaching implications for the region's ecosystem services and sustainability.}, } @article {pmid38179578, year = {2023}, author = {Pelofske, E and Hahn, G and Djidjev, H}, title = {Initial State Encoding via Reverse Quantum Annealing and H-Gain Features.}, journal = {IEEE transactions on quantum engineering}, volume = {4}, number = {}, pages = {}, pmid = {38179578}, issn = {2689-1808}, support = {R01 AI154470/AI/NIAID NIH HHS/United States ; U01 HL089897/HL/NHLBI NIH HHS/United States ; R21 HD095228/HD/NICHD NIH HHS/United States ; P30 ES002109/ES/NIEHS NIH HHS/United States ; U01 HG008685/HG/NHGRI NIH HHS/United States ; P01 HL132825/HL/NHLBI NIH HHS/United States ; U01 HL089856/HL/NHLBI NIH HHS/United States ; P01 HL120839/HL/NHLBI NIH HHS/United States ; }, abstract = {Quantum annealing is a specialized type of quantum computation that aims to use quantum fluctuations in order to obtain global minimum solutions of combinatorial optimization problems. Programmable D-Wave quantum annealers are available as cloud computing resources, which allow users low-level access to quantum annealing control features. In this article, we are interested in improving the quality of the solutions returned by a quantum annealer by encoding an initial state into the annealing process. We explore twoD-Wave features that allow one toencode such an initialstate: the reverse annealing (RA) and theh-gain(HG)features.RAaimstorefineaknownsolutionfollowinganannealpathstartingwithaclassical state representing a good solution, going backward to a point where a transverse field is present, and then finishing the annealing process with a forward anneal. The HG feature allows one to put a time-dependent weighting scheme on linear (h) biases of the Hamiltonian, and we demonstrate that this feature likewise can be used to bias the annealing to start from an initial state. We also consider a hybrid method consisting of a backward phase resembling RA and a forward phase using the HG initial state encoding. Importantly, we investigate the idea of iteratively applying RA and HG to a problem, with the goal of monotonically improving on an initial state that is not optimal. The HG encoding technique is evaluated on a variety of input problems including the edge-weighted maximum cut problem and the vertex-weighted maximum clique problem, demonstrating that the HG technique is a viable alternative to RA for some problems. We also investigate how the iterative procedures perform for both RA and HG initial state encodings on random whole-chip spin glasses with the native hardware connectivity of the D-Wave Chimera and Pegasus chips.}, } @article {pmid38178510, year = {2023}, author = {Xu, X and Lu, Y and Huang, Y and Zhou, X and Ma, R and Xiong, H and Li, M and Wu, Q and Xu, J}, title = {Frequency modulation of terahertz microcavity via strong coupling with plasmonic resonators.}, journal = {Optics express}, volume = {31}, number = {26}, pages = {44375-44384}, doi = {10.1364/OE.510365}, pmid = {38178510}, issn = {1094-4087}, abstract = {Tunable terahertz (THz) microcavities are crucial for the compact on-chip THz devices, aiming to future cloud-based computing, and artificial-intelligence technologies. However, the solutions to effectively modulate THz microcavities remain elusive. Strong coupling has been widely demonstrated in many configurations at different ambient conditions to date and may serve as a promising tool to modulate THz microcavities. Here, we schematically design a microcavity-plasmon hybrid system, and propose an effective approach to modulating the resonant frequencies of THz microcavities by the microcavity-resonator strong coupling. In this case, we observed the strongly coupling states, where the resultant two-polariton branches exhibit an anti-crossing splitting in the frequency domain, experimentally exhibiting a ∼6.2% frequency modulation to the microcavity compared to the uncoupled case. This work provides an efficient approach to modulating chip-scale THz microcavities, thereby facilitating the development and application of compact THz integrated devices, further empowering the evolution of future information processing and intelligent computing system.}, } @article {pmid38167901, year = {2024}, author = {DeWitt, PE and Rebull, MA and Bennett, TD}, title = {Open source and reproducible and inexpensive infrastructure for data challenges and education.}, journal = {Scientific data}, volume = {11}, number = {1}, pages = {8}, pmid = {38167901}, issn = {2052-4463}, support = {K23 HD074620/HD/NICHD NIH HHS/United States ; R03 HD094912/HD/NICHD NIH HHS/United States ; }, abstract = {Data sharing is necessary to maximize the actionable knowledge generated from research data. Data challenges can encourage secondary analyses of datasets. Data challenges in biomedicine often rely on advanced cloud-based computing infrastructure and expensive industry partnerships. Examples include challenges that use Google Cloud virtual machines and the Sage Bionetworks Dream Challenges platform. Such robust infrastructures can be financially prohibitive for investigators without substantial resources. Given the potential to develop scientific and clinical knowledge and the NIH emphasis on data sharing and reuse, there is a need for inexpensive and computationally lightweight methods for data sharing and hosting data challenges. To fill that gap, we developed a workflow that allows for reproducible model training, testing, and evaluation. We leveraged public GitHub repositories, open-source computational languages, and Docker technology. In addition, we conducted a data challenge using the infrastructure we developed. In this manuscript, we report on the infrastructure, workflow, and data challenge results. The infrastructure and workflow are likely to be useful for data challenges and education.}, } @article {pmid38166081, year = {2024}, author = {Tian, Z and Qiu, L and Wang, L}, title = {Drivers and influencers of blockchain and cloud-based business sustainability accounting in China: Enhancing practices and promoting adoption.}, journal = {PloS one}, volume = {19}, number = {1}, pages = {e0295802}, pmid = {38166081}, issn = {1932-6203}, abstract = {The field of sustainability accounting aims to integrate environmental, social, and governance factors into financial reporting. With the growing importance of sustainability practices, emerging technologies have the potential to revolutionize reporting methods. However, there is a lack of research on the factors influencing the adoption of blockchain and cloud-based sustainability accounting in China. This study employs a mixed-methods approach to examine the key drivers and barriers to technology adoption for sustainability reporting among Chinese businesses. Through a systematic literature review, gaps in knowledge were identified. Primary data was collected through an online survey of firms, followed by in-depth case studies. The findings of the study reveal a positive relationship between company size and reporting behaviors. However, size alone is not sufficient to predict outcomes accurately. The industry type also has significant but small effects, although its impact on reporting behaviors varies. The relationship between profitability and reporting behaviors is intricate and contingent, requiring contextual examination. The adoption of blockchain technology is positively associated with capabilities, resources, skills, and regulatory factors. On the other hand, cloud computing adoption is linked to resources, management support, and risk exposures. However, the specific impacts of industry on adoption remain inconclusive. This study aims to offer empirical validation of relationships, shedding light on the intricate nature of interactions that necessitate nuanced conceptualizations incorporating contextual moderators. The findings underscore the importance of providing customized support and adaptable guidance to accommodate the evolving practices in sustainability accounting. Moreover, the assimilation of technology and organizational changes highlights the need for multifaceted stakeholder cooperation to drive responsible innovation and address the challenges posed by digital transformations in this field.}, } @article {pmid38166050, year = {2024}, author = {Alourani, A and Khalid, A and Tahir, M and Sardaraz, M}, title = {Energy efficient virtual machines placement in cloud datacenters using genetic algorithm and adaptive thresholds.}, journal = {PloS one}, volume = {19}, number = {1}, pages = {e0296399}, pmid = {38166050}, issn = {1932-6203}, mesh = {*Conservation of Energy Resources ; *Algorithms ; Cloud Computing ; }, abstract = {Cloud computing platform provides on-demand IT services to users and advanced the technology. The purpose of virtualization is to improve the utilization of resources and reduce power consumption. Energy consumption is a major issue faced by data centers management. Virtual machine placement is an effective technique used for this purpose. Different algorithms have been proposed for virtual machine placement in cloud environments. These algorithms have considered different parameters. It is obvious that improving one parameter affects other parameters. There is still a need to reduce energy consumption in cloud data centers. Data centers need solutions that reduce energy consumption without affecting other parameters. There is a need to device solutions to effectively utilize cloud resources and reduce energy consumption. In this article, we present an algorithm for Virtual Machines (VMs) placement in cloud computing. The algorithm uses adaptive thresholding to identify over utilized and underutilized hosts to reduce energy consumption and Service Level Agreement (SLA) violations. The algorithm is validated with simulations and comparative results are presented.}, } @article {pmid38161217, year = {2024}, author = {Zhang, X and Dou, Z and Kim, SH and Upadhyay, G and Havert, D and Kang, S and Kazemi, K and Huang, KY and Aydin, O and Huang, R and Rahman, S and Ellis-Mohr, A and Noblet, HA and Lim, KH and Chung, HJ and Gritton, HJ and Saif, MTA and Kong, HJ and Beggs, JM and Gazzola, M}, title = {Mind In Vitro Platforms: Versatile, Scalable, Robust, and Open Solutions to Interfacing with Living Neurons.}, journal = {Advanced science (Weinheim, Baden-Wurttemberg, Germany)}, volume = {11}, number = {11}, pages = {e2306826}, pmid = {38161217}, issn = {2198-3844}, support = {2123781//National Science Foundation/ ; 1830881//National Science Foundation/ ; }, mesh = {Electrodes ; *Brain/physiology ; *Neurons/physiology ; Electric Stimulation ; Electrophysiological Phenomena/physiology ; }, abstract = {Motivated by the unexplored potential of in vitro neural systems for computing and by the corresponding need of versatile, scalable interfaces for multimodal interaction, an accurate, modular, fully customizable, and portable recording/stimulation solution that can be easily fabricated, robustly operated, and broadly disseminated is presented. This approach entails a reconfigurable platform that works across multiple industry standards and that enables a complete signal chain, from neural substrates sampled through micro-electrode arrays (MEAs) to data acquisition, downstream analysis, and cloud storage. Built-in modularity supports the seamless integration of electrical/optical stimulation and fluidic interfaces. Custom MEA fabrication leverages maskless photolithography, favoring the rapid prototyping of a variety of configurations, spatial topologies, and constitutive materials. Through a dedicated analysis and management software suite, the utility and robustness of this system are demonstrated across neural cultures and applications, including embryonic stem cell-derived and primary neurons, organotypic brain slices, 3D engineered tissue mimics, concurrent calcium imaging, and long-term recording. Overall, this technology, termed "mind in vitro" to underscore the computing inspiration, provides an end-to-end solution that can be widely deployed due to its affordable (>10× cost reduction) and open-source nature, catering to the expanding needs of both conventional and unconventional electrophysiology.}, } @article {pmid38155856, year = {2023}, author = {Lai, H and Chen, B and Yin, X and Wang, G and Wang, X and Yun, T and Lan, G and Wu, Z and Yang, C and Kou, W}, title = {Dry season temperature and rainy season precipitation significantly affect the spatio-temporal pattern of rubber plantation phenology in Yunnan province.}, journal = {Frontiers in plant science}, volume = {14}, number = {}, pages = {1283315}, pmid = {38155856}, issn = {1664-462X}, abstract = {The ongoing global warming trajectory poses extensive challenges to plant ecosystems, with rubber plantations particularly vulnerable due to their influence on not only the longevity of the growth cycle and rubber yield, but also the complex interplay of carbon, water, and energy exchanges between the forest canopy and atmosphere. However, the response mechanism of phenology in rubber plantations to climate change remains unclear. This study concentrates on sub-optimal environment rubber plantations in Yunnan province, Southwest China. Utilizing the Google Earth Engine (GEE) cloud platform, multi-source remote sensing images were synthesized at 8-day intervals with a spatial resolution of 30-meters. The Normalized Difference Vegetation Index (NDVI) time series was reconstructed using the Savitzky-Golay (S-G) filter, coupled with the application of the seasonal amplitude method to extract three crucial phenological indicators, namely the start of the growing season (SOS), the end of the growing season (EOS), and the length of the growing season (LOS). Linear regression method, Pearson correlation coefficient, multiple stepwise regression analysis were used to extract of the phenology trend and find the relationship between SOS, EOS and climate factors. The findings demonstrated that 1) the phenology of rubber plantations has undergone dynamic changes over the past two decades. Specifically, the SOS advanced by 9.4 days per decade (R[2] = 0.42, p< 0.01), whereas the EOS was delayed by 3.8 days per decade (R[2] = 0.35, p< 0.01). Additionally, the LOS was extended by 13.2 days per decade (R[2] = 0.55, p< 0.01); 2) rubber phenology demonstrated a notable sensitivity to temperature fluctuations during the dry season and precipitation patterns during the rainy season. The SOS advanced 2.0 days (r =-0.19, p< 0.01) and the EOS advanced 2.8 days (r =-0.35, p< 0.01) for every 1°C increase in the cool-dry season. Whereas a 100 mm increase in rainy season precipitation caused the SOS to be delayed by 2.0 days (r = 0.24, p< 0.01), a 100 mm increase in hot-dry season precipitation caused the EOS to be advanced by 7.0 days (r =-0.28, p< 0.01); 3) rubber phenology displayed a legacy effect of preseason climate variations. Changes in temperature during the fourth preseason month and precipitation during the fourth and eleventh preseason months are predominantly responsible for the variation in SOS. Meanwhile, temperature changes during the second, fourth, and ninth preseason months are primarily responsible for the variation in EOS. The study aims to enhance our understanding of how rubber plantations respond to climate change in sub-optimal environments and provide valuable insights for sustainable rubber production management in the face of changing environmental conditions.}, } @article {pmid38151930, year = {2023}, author = {Wang, X and Li, Q and Ma, C and Zhang, S and Lin, Y and Li, J and Liu, C}, title = {[Artificial intelligence in wearable electrocardiogram monitoring].}, journal = {Sheng wu yi xue gong cheng xue za zhi = Journal of biomedical engineering = Shengwu yixue gongchengxue zazhi}, volume = {40}, number = {6}, pages = {1084-1092}, pmid = {38151930}, issn = {1001-5515}, mesh = {Humans ; Artificial Intelligence ; Reproducibility of Results ; Electrocardiography ; *Cardiovascular Diseases ; *Wearable Electronic Devices ; }, abstract = {Electrocardiogram (ECG) monitoring owns important clinical value in diagnosis, prevention and rehabilitation of cardiovascular disease (CVD). With the rapid development of Internet of Things (IoT), big data, cloud computing, artificial intelligence (AI) and other advanced technologies, wearable ECG is playing an increasingly important role. With the aging process of the population, it is more and more urgent to upgrade the diagnostic mode of CVD. Using AI technology to assist the clinical analysis of long-term ECGs, and thus to improve the ability of early detection and prediction of CVD has become an important direction. Intelligent wearable ECG monitoring needs the collaboration between edge and cloud computing. Meanwhile, the clarity of medical scene is conducive for the precise implementation of wearable ECG monitoring. This paper first summarized the progress of AI-related ECG studies and the current technical orientation. Then three cases were depicted to illustrate how the AI in wearable ECG cooperate with the clinic. Finally, we demonstrated the two core issues-the reliability and worth of AI-related ECG technology and prospected the future opportunities and challenges.}, } @article {pmid38146308, year = {2024}, author = {Singh, S and Hou, F and Wang, R}, title = {Real and synthetic Punjabi speech datasets for automatic speech recognition.}, journal = {Data in brief}, volume = {52}, number = {}, pages = {109865}, doi = {10.1016/j.dib.2023.109865}, pmid = {38146308}, issn = {2352-3409}, abstract = {Automatic speech recognition (ASR) has been an active area of research. Training with large annotated datasets is the key to the development of robust ASR systems. However, most available datasets are focused on high-resource languages like English, leaving a significant gap for low-resource languages. Among these languages is Punjabi, despite its large number of speakers, Punjabi lacks high-quality annotated datasets for accurate speech recognition. To address this gap, we introduce three labeled Punjabi speech datasets: Punjabi Speech (real speech dataset) and Google-synth/CMU-synth (synthesized speech datasets). The Punjabi Speech dataset consists of read speech recordings captured in various environments, including both studio and open settings. In addition, the Google-synth dataset is synthesized using Google's Punjabi text-to-speech cloud services. Furthermore, the CMU-synth dataset is created using the Clustergen model available in the Festival speech synthesis system developed by CMU. These datasets aim to facilitate the development of accurate Punjabi speech recognition systems, bridging the resource gap for this important language.}, } @article {pmid38140780, year = {2024}, author = {Li, B and Du, K and Qu, G and Tang, N}, title = {Big data research in nursing: A bibliometric exploration of themes and publications.}, journal = {Journal of nursing scholarship : an official publication of Sigma Theta Tau International Honor Society of Nursing}, volume = {56}, number = {3}, pages = {466-477}, doi = {10.1111/jnu.12954}, pmid = {38140780}, issn = {1547-5069}, support = {22A320067//the Key Research Project in Higher Education in Henan, China/ ; SBGJ202103076//Medical science and technology public relations project jointly built by Henan Health Commission/ ; HLKY2023002//Nursing research Special Fund of the First Affiliated Hospital of Zhengzhou University/ ; }, mesh = {*Bibliometrics ; Humans ; *Big Data ; *Nursing Research ; Data Mining ; Publications/statistics & numerical data/trends ; }, abstract = {AIMS: To comprehend the current research hotspots and emerging trends in big data research within the global nursing domain.

DESIGN: Bibliometric analysis.

METHODS: The quality articles for analysis indexed by the science core collection were obtained from the Web of Science database as of February 10, 2023.The descriptive, visual analysis and text mining were realized by CiteSpace and VOSviewer.

RESULTS: The research on big data in the nursing field has experienced steady growth over the past decade. A total of 45 core authors and 17 core journals around the world have contributed to this field. The author's keyword analysis has revealed five distinct clusters of research focus. These encompass machine/deep learning and artificial intelligence, natural language processing, big data analytics and data science, IoT and cloud computing, and the development of prediction models through data mining. Furthermore, a comparative examination was conducted with data spanning from 1980 to 2016, and an extended analysis was performed covering the years from 1980 to 2019. This bibliometric mapping comparison allowed for the identification of prevailing research trends and the pinpointing of potential future research hotspots within the field.

CONCLUSIONS: The fusion of data mining and nursing research has steadily advanced and become more refined over time. Technologically, it has expanded from initial natural language processing to encompass machine learning, deep learning, artificial intelligence, and data mining approach that amalgamates multiple technologies. Professionally, it has progressed from addressing patient safety and pressure ulcers to encompassing chronic diseases, critical care, emergency response, community and nursing home settings, and specific diseases (Cardiovascular diseases, diabetes, stroke, etc.). The convergence of IoT, cloud computing, fog computing, and big data processing has opened new avenues for research in geriatric nursing management and community care. However, a global imbalance exists in utilizing big data in nursing research, emphasizing the need to enhance data science literacy among clinical staff worldwide to advance this field.

CLINICAL RELEVANCE: This study focused on the thematic trends and evolution of research on the big data in nursing research. Moreover, this study may contribute to the understanding of researchers, journals, and countries around the world and generate the possible collaborations of them to promote the development of big data in nursing science.}, } @article {pmid38139731, year = {2023}, author = {Yang, X and Fang, H and Gao, Y and Wang, X and Wang, K and Liu, Z}, title = {Computation Offloading and Resource Allocation Based on P-DQN in LEO Satellite Edge Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {24}, pages = {}, pmid = {38139731}, issn = {1424-8220}, support = {2020YFB1808003//National Key Research and Development Program of China/ ; 61801379//National Natural Science Foundation of China/ ; 2020JQ-647//Natural Science Foundation of Shaanxi Province of China/ ; }, abstract = {Traditional low earth orbit (LEO) satellite networks are typically independent of terrestrial networks, which develop relatively slowly due to the on-board capacity limitation. By integrating emerging mobile edge computing (MEC) with LEO satellite networks to form the business-oriented "end-edge-cloud" multi-level computing architecture, some computing-sensitive tasks can be offloaded by ground terminals to satellites, thereby satisfying more tasks in the network. How to make computation offloading and resource allocation decisions in LEO satellite edge networks, nevertheless, indeed poses challenges in tracking network dynamics and handling sophisticated actions. For the discrete-continuous hybrid action space and time-varying networks, this work aims to use the parameterized deep Q-network (P-DQN) for the joint computation offloading and resource allocation. First, the characteristics of time-varying channels are modeled, and then both communication and computation models under three different offloading decisions are constructed. Second, the constraints on task offloading decisions, on remaining available computing resources, and on the power control of LEO satellites as well as the cloud server are formulated, followed by the maximization problem of satisfied task number over the long run. Third, using the parameterized action Markov decision process (PAMDP) and P-DQN, the joint computing offloading, resource allocation, and power control are made in real time, to accommodate dynamics in LEO satellite edge networks and dispose of the discrete-continuous hybrid action space. Simulation results show that the proposed P-DQN method could approach the optimal control, and outperforms other reinforcement learning (RL) methods for merely either discrete or continuous action space, in terms of the long-term rate of satisfied tasks.}, } @article {pmid38139716, year = {2023}, author = {Aldaej, A and Ahanger, TA and Ullah, I}, title = {Deep Learning-Inspired IoT-IDS Mechanism for Edge Computing Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {24}, pages = {}, pmid = {38139716}, issn = {1424-8220}, support = {2022/01/21723//Prince Sattam Bin Abdulaziz University/ ; }, abstract = {The Internet of Things (IoT) technology has seen substantial research in Deep Learning (DL) techniques to detect cyberattacks. Critical Infrastructures (CIs) must be able to quickly detect cyberattacks close to edge devices in order to prevent service interruptions. DL approaches outperform shallow machine learning techniques in attack detection, giving them a viable alternative for use in intrusion detection. However, because of the massive amount of IoT data and the computational requirements for DL models, transmission overheads prevent the successful implementation of DL models closer to the devices. As they were not trained on pertinent IoT, current Intrusion Detection Systems (IDS) either use conventional techniques or are not intended for scattered edge-cloud deployment. A new edge-cloud-based IoT IDS is suggested to address these issues. It uses distributed processing to separate the dataset into subsets appropriate to different attack classes and performs attribute selection on time-series IoT data. Next, DL is used to train an attack detection Recurrent Neural Network, which consists of a Recurrent Neural Network (RNN) and Bidirectional Long Short-Term Memory (LSTM). The high-dimensional BoT-IoT dataset, which replicates massive amounts of genuine IoT attack traffic, is used to test the proposed model. Despite an 85 percent reduction in dataset size made achievable by attribute selection approaches, the attack detection capability was kept intact. The models built utilizing the smaller dataset demonstrated a higher recall rate (98.25%), F1-measure (99.12%), accuracy (99.56%), and precision (99.45%) with no loss in class discrimination performance compared to models trained on the entire attribute set. With the smaller attribute space, neither the RNN nor the Bi-LSTM models experienced underfitting or overfitting. The proposed DL-based IoT intrusion detection solution has the capability to scale efficiently in the face of large volumes of IoT data, thus making it an ideal candidate for edge-cloud deployment.}, } @article {pmid38139704, year = {2023}, author = {Peixoto, J and Sousa, J and Carvalho, R and Santos, G and Cardoso, R and Reis, A}, title = {End-to-End Solution for Analog Gauge Monitoring Using Computer Vision in an IoT Platform.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {24}, pages = {}, pmid = {38139704}, issn = {1424-8220}, support = {POCI-01-0247-FEDER-047091-GRS: Glartek Retrofit Sensors//Fundo Europeu de Desenvolvimento Regional (FEDER)/ ; }, abstract = {The emergence of Industry 4.0 and 5.0 technologies has enabled the digital transformation of various processes and the integration of sensors with the internet. Despite these strides, many industrial sectors still rely on visual inspection of physical processes, especially those employing analog gauges. This method of monitoring introduces the risk of human errors and inefficiencies. Automating these processes has the potential, not only to boost productivity for companies, but also potentially reduce risks for workers. Therefore, this paper proposes an end-to-end solution to digitize analog gauges and monitor them using computer vision through integrating them into an IoT architecture, to tackle these problems. Our prototype device has been designed to capture images of gauges and transmit them to a remote server, where computer vision algorithms analyze the images and obtain gauge readings. These algorithms achieved adequate robustness and accuracy for industrial environments, with an average relative error of 0.95%. In addition, the gauge data were seamlessly integrated into an IoT platform leveraging computer vision and cloud computing technologies. This integration empowers users to create custom dashboards for real-time gauge monitoring, while also enabling them to set thresholds, alarms, and warnings, as needed. The proposed solution was tested and validated in a real-world industrial scenario, demonstrating the solution's potential to be implemented in a large-scale setting to serve workers, reduce costs, and increase productivity.}, } @article {pmid38139612, year = {2023}, author = {Ju, S and Park, Y}, title = {Provably Secure Lightweight Mutual Authentication and Key Agreement Scheme for Cloud-Based IoT Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {24}, pages = {}, pmid = {38139612}, issn = {1424-8220}, support = {2022//Keimyung University/ ; }, abstract = {A paradigm that combines cloud computing and the Internet of Things (IoT) allows for more impressive services to be provided to users while addressing storage and computational resource issues in the IoT environments. This cloud-based IoT environment has been used in various industries, including public services, for quite some time, and has been researched in academia. However, various security issues can arise during the communication between IoT devices and cloud servers, because communication between devices occurs in open channels. Moreover, issues such as theft of a user's IoT device or extraction of key parameters from the user's device in a remote location can arise. Researchers interested in these issues have proposed lightweight mutual authentication key agreement protocols that are safe and suitable for IoT environments. Recently, a lightweight authentication scheme between IoT devices and cloud servers has been presented. However, we found out their scheme had various security vulnerabilities, vulnerable to insider, impersonation, verification table leakage, and privileged insider attacks, and did not provide users with untraceability. To address these flaws, we propose a provably secure lightweight authentication scheme. The proposed scheme uses the user's biometric information and the cloud server's secret key to prevent the exposure of key parameters. Additionally, it ensures low computational costs for providing users with real-time and fast services using only exclusive OR operations and hash functions in the IoT environments. To analyze the safety of the proposed scheme, we use informal security analysis, Burrows-Abadi-Needham (BAN) logic and a Real-or-Random (RoR) model. The analysis results confirm that our scheme is secure against insider attacks, impersonation attacks, stolen verifier attacks, and so on; furthermore, it provides additional security elements. Simultaneously, it has been verified to possess enhanced communication costs, and total bit size has been shortened to 3776 bits, which is improved by almost 6% compared to Wu et al.'s scheme. Therefore, we demonstrate that the proposed scheme is suitable for cloud-based IoT environments.}, } @article {pmid38139476, year = {2023}, author = {Zhang, T and Fan, Y}, title = {A 3D U-Net Based on a Vision Transformer for Radar Semantic Segmentation.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {24}, pages = {}, pmid = {38139476}, issn = {1424-8220}, support = {61976033//National Natural Science Foundation of China/ ; 2022JH24/10200029//Pilot Base Construction and Pilot Verification Plan Program of Liaoning Province of China/ ; 2019JH8/10100100//Key Development Guidance Program of Liaoning Province of China/ ; 2022M710569//China Postdoctoral Science Foundation/ ; }, abstract = {Radar data can be presented in various forms, unlike visible data. In the field of radar target recognition, most current work involves point cloud data due to computing limitations, but this form of data lacks useful information. This paper proposes a semantic segmentation network to process high-dimensional data and enable automatic radar target recognition. Rather than relying on point cloud data, which is common in current radar automatic target recognition algorithms, the paper suggests using a radar heat map of high-dimensional data to increase the efficiency of radar data use. The radar heat map provides more complete information than point cloud data, leading to more accurate classification results. Additionally, this paper proposes a dimension collapse module based on a vision transformer for feature extraction between two modules with dimension differences during dimension changes in high-dimensional data. This module is easily extendable to other networks with high-dimensional data collapse requirements. The network's performance is verified using a real radar dataset, showing that the radar semantic segmentation network based on a vision transformer has better performance and fewer parameters compared to segmentation networks that use other dimensional collapse methods.}, } @article {pmid38136978, year = {2023}, author = {Song, Y and Zhong, S and Li, Y and Jiang, M and Wei, Q}, title = {Constructing an Interactive and Integrated Analysis and Identification Platform for Pathogenic Microorganisms to Support Surveillance Capacity.}, journal = {Genes}, volume = {14}, number = {12}, pages = {}, pmid = {38136978}, issn = {2073-4425}, support = {2022YFC2602200//Supported by National Key Research and Development Program of China/ ; }, mesh = {*Software ; *User-Computer Interface ; Genomics/methods ; Computational Biology/methods ; Genome ; }, abstract = {INTRODUCTION: Whole genome sequencing (WGS) holds significant promise for epidemiological inquiries, as it enables the identification and tracking of pathogenic origins and dissemination through comprehensive genome analysis. This method is widely preferred for investigating outbreaks and monitoring pathogen activity. However, the effective utilization of microbiome sequencing data remains a challenge for clinical and public health experts. Through the National Pathogen Resource Center, we have constructed a dynamic and interactive online analysis platform to facilitate the in-depth analysis and use of pathogen genomic data, by public health and associated professionals, to support infectious disease surveillance framework building and capacity warnings.

METHOD: The platform was implemented using the Java programming language, and the front-end pages were developed using the VUE framework, following the MVC (Model-View-Controller) pattern to enable interactive service functionalities for front-end data collection and back-end data computation. Cloud computing services were employed to integrate biological information analysis tools for conducting fundamental analysis on sequencing data.

RESULT: The platform achieved the goal of non-programming analysis, providing an interactive visual interface that allows users to visually obtain results by setting parameters in web pages. Moreover, the platform allows users to export results in various formats to further support their research.

DISCUSSION: We have established a dynamic and interactive online platform for bioinformatics analysis. By encapsulating the complex background experiments and analysis processes in a cloud-based service platform, the complex background experiments and analysis processes are presented to the end-user in a simple and interactive manner. It facilitates real-time data mining and analysis by allowing users to independently select parameters and generate analysis results at the click of a button, based on their needs, without the need for a programming foundation.}, } @article {pmid38136521, year = {2023}, author = {Xia, C and Jin, X and Xu, C and Zeng, P}, title = {Computational-Intelligence-Based Scheduling with Edge Computing in Cyber-Physical Production Systems.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {12}, pages = {}, pmid = {38136521}, issn = {1099-4300}, support = {61903356//National Natural Science Foundation of China/ ; }, abstract = {Real-time performance and reliability are two critical indicators in cyber-physical production systems (CPPS). To meet strict requirements in terms of these indicators, it is necessary to solve complex job-shop scheduling problems (JSPs) and reserve considerable redundant resources for unexpected jobs before production. However, traditional job-shop methods are difficult to apply under dynamic conditions due to the uncertain time cost of transmission and computation. Edge computing offers an efficient solution to this issue. By deploying edge servers around the equipment, smart factories can achieve localized decisions based on computational intelligence (CI) methods offloaded from the cloud. Most works on edge computing have studied task offloading and dispatching scheduling based on CI. However, few of the existing methods can be used for behavior-level control due to the corresponding requirements for ultralow latency (10 ms) and ultrahigh reliability (99.9999% in wireless transmission), especially when unexpected computing jobs arise. Therefore, this paper proposes a dynamic resource prediction scheduling (DRPS) method based on CI to achieve real-time localized behavior-level control. The proposed DRPS method primarily focuses on the schedulability of unexpected computing jobs, and its core ideas are (1) to predict job arrival times based on a backpropagation neural network and (2) to perform real-time migration in the form of human-computer interaction based on the results of resource analysis. An experimental comparison with existing schemes shows that our DRPS method improves the acceptance ratio by 25.9% compared to the earliest deadline first scheme.}, } @article {pmid38136475, year = {2023}, author = {Kang, H and Liu, G and Wang, Q and Meng, L and Liu, J}, title = {Theory and Application of Zero Trust Security: A Brief Survey.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {12}, pages = {}, pmid = {38136475}, issn = {1099-4300}, abstract = {As cross-border access becomes more frequent, traditional perimeter-based network security models can no longer cope with evolving security requirements. Zero trust is a novel paradigm for cybersecurity based on the core concept of "never trust, always verify". It attempts to protect against security risks related to internal threats by eliminating the demarcations between the internal and external network of traditional network perimeters. Nevertheless, research on the theory and application of zero trust is still in its infancy, and more extensive research is necessary to facilitate a deeper understanding of the paradigm in academia and the industry. In this paper, trust in cybersecurity is discussed, following which the origin, concepts, and principles related to zero trust are elaborated on. The characteristics, strengths, and weaknesses of the existing research are analysed in the context of zero trust achievements and their technical applications in Cloud and IoT environments. Finally, to support the development and application of zero trust in the future, the concept and its current challenges are analysed.}, } @article {pmid38134209, year = {2024}, author = {Wang, J and Hu, Y and Xiang, L and Morota, G and Brooks, SA and Wickens, CL and Miller-Cushon, EK and Yu, H}, title = {Technical note: ShinyAnimalCV: open-source cloud-based web application for object detection, segmentation, and three-dimensional visualization of animals using computer vision.}, journal = {Journal of animal science}, volume = {102}, number = {}, pages = {}, pmid = {38134209}, issn = {1525-3163}, mesh = {Animals ; *Cloud Computing ; *Imaging, Three-Dimensional/veterinary ; Software ; Computers ; Animal Husbandry ; Livestock ; }, abstract = {Computer vision (CV), a non-intrusive and cost-effective technology, has furthered the development of precision livestock farming by enabling optimized decision-making through timely and individualized animal care. The availability of affordable two- and three-dimensional camera sensors, combined with various machine learning and deep learning algorithms, has provided a valuable opportunity to improve livestock production systems. However, despite the availability of various CV tools in the public domain, applying these tools to animal data can be challenging, often requiring users to have programming and data analysis skills, as well as access to computing resources. Moreover, the rapid expansion of precision livestock farming is creating a growing need to educate and train animal science students in CV. This presents educators with the challenge of efficiently demonstrating the complex algorithms involved in CV. Thus, the objective of this study was to develop ShinyAnimalCV, an open-source cloud-based web application designed to facilitate CV teaching in animal science. This application provides a user-friendly interface for performing CV tasks, including object segmentation, detection, three-dimensional surface visualization, and extraction of two- and three-dimensional morphological features. Nine pre-trained CV models using top-view animal data are included in the application. ShinyAnimalCV has been deployed online using cloud computing platforms. The source code of ShinyAnimalCV is available on GitHub, along with detailed documentation on training CV models using custom data and deploying ShinyAnimalCV locally to allow users to fully leverage the capabilities of the application. ShinyAnimalCV can help to support the teaching of CV, thereby laying the groundwork to promote the adoption of CV in the animal science community.}, } @article {pmid38133241, year = {2023}, author = {Afonso, CL and Afonso, AM}, title = {Next-Generation Sequencing for the Detection of Microbial Agents in Avian Clinical Samples.}, journal = {Veterinary sciences}, volume = {10}, number = {12}, pages = {}, pmid = {38133241}, issn = {2306-7381}, abstract = {Direct-targeted next-generation sequencing (tNGS), with its undoubtedly superior diagnostic capacity over real-time PCR (RT-PCR), and direct-non-targeted NGS (ntNGS), with its higher capacity to identify and characterize multiple agents, are both likely to become diagnostic methods of choice in the future. tNGS is a rapid and sensitive method for precise characterization of suspected agents. ntNGS, also known as agnostic diagnosis, does not require a hypothesis and has been used to identify unsuspected infections in clinical samples. Implemented in the form of multiplexed total DNA metagenomics or as total RNA sequencing, the approach produces comprehensive and actionable reports that allow semi-quantitative identification of most of the agents present in respiratory, cloacal, and tissue samples. The diagnostic benefits of the use of direct tNGS and ntNGS are high specificity, compatibility with different types of clinical samples (fresh, frozen, FTA cards, and paraffin-embedded), production of nearly complete infection profiles (viruses, bacteria, fungus, and parasites), production of "semi-quantitative" information, direct agent genotyping, and infectious agent mutational information. The achievements of NGS in terms of diagnosing poultry problems are described here, along with future applications. Multiplexing, development of standard operating procedures, robotics, sequencing kits, automated bioinformatics, cloud computing, and artificial intelligence (AI) are disciplines converging toward the use of this technology for active surveillance in poultry farms. Other advances in human and veterinary NGS sequencing are likely to be adaptable to avian species in the future.}, } @article {pmid38126383, year = {2023}, author = {Fonseca, ELD and Santos, ECD and Figueiredo, AR and Simões, JC}, title = {The use of sentinel-2 imagery to generate vegetations maps for the Northern Antarctic peninsula and offshore islands.}, journal = {Anais da Academia Brasileira de Ciencias}, volume = {95}, number = {suppl 3}, pages = {e20230710}, doi = {10.1590/0001-3765202320230710}, pmid = {38126383}, issn = {1678-2690}, mesh = {Antarctic Regions ; *Plants ; *Bryophyta ; }, abstract = {We used Sentinel-2 imagery time series to generate a vegetation map for the Northern part of the Antarctica Peninsula and offshore islands, including the South Shetlands. The vegetation cover was identified in the NDVI maximum value composite image. The NDVI values were associated with the occurrence of algae (0.15 - 0.20), lichens (0.20 - 0.50), and mosses (0.50 - 0.80). The vegetation cover distribution map was validated using the literature information. Generating a vegetation map distribution on an annual basis was not possible due to high cloud cover in the Antarctic region, especially in coastal áreas, so optical images from 2016 to 2021 were necessary to map the vegetation distribution in the entire study área. The final map analyzed in association with the weather data shows the occurrence of a microenvironment over the western islands of the Antarctic Peninsula that provided vegetation growth conditions. The Sentinel-2 images with 10m spatial resolution allow the assembly of accurate vegetation distribution maps for the Antarctica Peninsula and Islands, the Google Earth Engine cloud computing being essential to process a large amount of the satellite images necessary for processing these maps.}, } @article {pmid38124874, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: Blockchain-Based Trust Management Framework for Cloud Computing-Based Internet of Medical Things (IoMT): A Systematic Review.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9867976}, pmid = {38124874}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/9766844.].}, } @article {pmid38124577, year = {2023}, author = {Niu, S and Liu, W and Yan, S and Liu, Q}, title = {Message sharing scheme based on edge computing in IoV.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {20}, number = {12}, pages = {20809-20827}, doi = {10.3934/mbe.2023921}, pmid = {38124577}, issn = {1551-0018}, abstract = {With the rapid development of 5G wireless communication and sensing technology, the Internet of Vehicles (IoV) will establish a widespread network between vehicles and roadside infrastructure. The collected road information is transferred to the cloud server with the assistance of roadside infrastructure, where it is stored and made available to other vehicles as a resource. However, in an open cloud environment, message confidentiality and vehicle identity privacy are severely compromised, and current attribute-based encryption algorithms still burden vehicles with large computational costs. In order to resolve these issues, we propose a message-sharing scheme in IoV based on edge computing. To start, we utilize attribute-based encryption techniques to protect the communications being delivered. We introduce edge computing, in which the vehicle outsources some operations in encryption and decryption to roadside units to reduce the vehicle's computational load. Second, to guarantee the integrity of the message and the security of the vehicle identity, we utilize anonymous identity-based signature technology. At the same time, we can batch verify the message, which further reduces the time and transmission of verifying a large number of message signatures. Based on the computational Diffie-Hellman problem, it is demonstrated that the proposed scheme is secure under the random oracle model. Finally, the performance analysis results show that our work is more computationally efficient compared to existing schemes and is more suitable for actual vehicle networking.}, } @article {pmid38114166, year = {2023}, author = {Ma, XR and Wang, BX and Zhao, WS and Cong, DG and Sun, W and Xiong, HS and Zhang, SN}, title = {[Application progress on data-driven technologies in intelligent manufacturing of traditional Chinese medicine extraction].}, journal = {Zhongguo Zhong yao za zhi = Zhongguo zhongyao zazhi = China journal of Chinese materia medica}, volume = {48}, number = {21}, pages = {5701-5706}, doi = {10.19540/j.cnki.cjcmm.20230824.601}, pmid = {38114166}, issn = {1001-5302}, mesh = {*Medicine, Chinese Traditional ; *Drugs, Chinese Herbal ; Quality Control ; Big Data ; Algorithms ; }, abstract = {The application of new-generation information technologies such as big data, the internet of things(IoT), and cloud computing in the traditional Chinese medicine(TCM)manufacturing industry is gradually deepening, driving the intelligent transformation and upgrading of the TCM industry. At the current stage, there are challenges in understanding the extraction process and its mechanisms in TCM. Online detection technology faces difficulties in making breakthroughs, and data throughout the entire production process is scattered, lacking valuable mining and utilization, which significantly hinders the intelligent upgrading of the TCM industry. Applying data-driven technologies in the process of TCM extraction can enhance the understanding of the extraction process, achieve precise control, and effectively improve the quality of TCM products. This article analyzed the technological bottlenecks in the production process of TCM extraction, summarized commonly used data-driven algorithms in the research and production control of extraction processes, and reviewed the progress in the application of data-driven technologies in the following five aspects: mechanism analysis of the extraction process, process development and optimization, online detection, process control, and production management. This article is expected to provide references for optimizing the extraction process and intelligent production of TCM.}, } @article {pmid38113434, year = {2024}, author = {Brown, C and Agarwal, A and Luque, A}, title = {pyCapsid: identifying dominant dynamics and quasi-rigid mechanical units in protein shells.}, journal = {Bioinformatics (Oxford, England)}, volume = {40}, number = {1}, pages = {}, pmid = {38113434}, issn = {1367-4811}, support = {1951678//National Science Foundation/ ; GBMF9871//Gordon and Betty Moore Foundation/ ; }, mesh = {*Software ; *Proteins ; Amino Acids ; Documentation ; }, abstract = {SUMMARY: pyCapsid is a Python package developed to facilitate the characterization of the dynamics and quasi-rigid mechanical units of protein shells and other protein complexes. The package was developed in response to the rapid increase of high-resolution structures, particularly capsids of viruses, requiring multiscale biophysical analyses. Given a protein shell, pyCapsid generates the collective vibrations of its amino-acid residues, identifies quasi-rigid mechanical regions associated with the disassembly of the structure, and maps the results back to the input proteins for interpretation. pyCapsid summarizes the main results in a report that includes publication-quality figures.

pyCapsid's source code is available under MIT License on GitHub. It is compatible with Python 3.8-3.10 and has been deployed in two leading Python package-management systems, PIP and Conda. Installation instructions and tutorials are available in the online documentation and in the pyCapsid's YouTube playlist. In addition, a cloud-based implementation of pyCapsid is available as a Google Colab notebook. pyCapsid Colab does not require installation and generates the same report and outputs as the installable version. Users can post issues regarding pyCapsid in the repository's issues section.}, } @article {pmid38113067, year = {2023}, author = {Faisal, S and Samoth, D and Aslam, Y and Patel, H and Park, S and Baby, B and Patel, T}, title = {Key Features of Smart Medication Adherence Products: Updated Scoping Review.}, journal = {JMIR aging}, volume = {6}, number = {}, pages = {e50990}, pmid = {38113067}, issn = {2561-7605}, abstract = {BACKGROUND: Older adults often face challenges in self-managing their medication owing to physical and cognitive limitations, complex medication regimens, and packaging of medications. Emerging smart medication dispensing and adherence products (SMAPs) offer the options of automated dispensing, tracking medication intake in real time, and reminders and notifications. A 2021 review identified 51 SMAPs owing to the rapid influx of digital technology; an update to this review is required.

OBJECTIVE: This review aims to identify new products and summarize and compare the key features of SMAPs.

METHODS: Gray and published literature and videos were searched using Google, YouTube, PubMed, Embase, and Scopus. The first 10 pages of Google and the first 100 results of YouTube were screened using 4 and 5 keyword searches, respectively. SMAPs were included if they were able to store and allowed for the dispensation of medications, tracked real-time medication intake data, and could automatically analyze data. Products were excluded if they were stand-alone software applications, not marketed in English, not for in-home use, or only used in clinical trials. In total, 5 researchers independently screened and extracted the data.

RESULTS: This review identified 114 SMAPs, including 80 (70.2%) marketed and 34 (29.8%) prototypes, grouped into 15 types. Among the marketed products, 68% (54/80) were available for consumer purchase. Of these products, 26% (14/54) were available worldwide and 78% (42/54) were available in North America. There was variability in the hardware, software, data collection and management features, and cost of the products. Examples of hardware features include battery life, medication storage capacity, availability of types and number of alarms, locking features, and additional technology required for use of the product, whereas software features included reminder and notification capabilities and availability of manufacturer support. Data capture methods included the availability of sensors to record the use of the product and data-syncing capabilities with cloud storage with short-range communications. Data were accessible to users via mobile apps or web-based portals. Some SMAPs provided data security assurance with secure log-ins (use of personal identification numbers or facial recognition), whereas other SMAPs provided data through registered email addresses. Although some SMAPs were available at set prices or free of cost to end users, the cost of other products varied based on availability, shipping fees, and subscription fees.

CONCLUSIONS: An expanding market for SMAPs with features specific to at-home patient use is emerging. Health care professionals can use these features to select and suggest products that meet their patients' unique requirements.}, } @article {pmid38107765, year = {2023}, author = {Alam, AKMM and Chen, K}, title = {TEE-Graph: efficient privacy and ownership protection for cloud-based graph spectral analysis.}, journal = {Frontiers in big data}, volume = {6}, number = {}, pages = {1296469}, pmid = {38107765}, issn = {2624-909X}, abstract = {INTRODUCTION: Big graphs like social network user interactions and customer rating matrices require significant computing resources to maintain. Data owners are now using public cloud resources for storage and computing elasticity. However, existing solutions do not fully address the privacy and ownership protection needs of the key involved parties: data contributors and the data owner who collects data from contributors.

METHODS: We propose a Trusted Execution Environment (TEE) based solution: TEE-Graph for graph spectral analysis of outsourced graphs in the cloud. TEEs are new CPU features that can enable much more efficient confidential computing solutions than traditional software-based cryptographic ones. Our approach has several unique contributions compared to existing confidential graph analysis approaches. (1) It utilizes the unique TEE properties to ensure contributors' new privacy needs, e.g., the right of revocation for shared data. (2) It implements efficient access-pattern protection with a differentially private data encoding method. And (3) it implements TEE-based special analysis algorithms: the Lanczos method and the Nystrom method for efficiently handling big graphs and protecting confidentiality from compromised cloud providers.

RESULTS: The TEE-Graph approach is much more efficient than software crypto approaches and also immune to access-pattern-based attacks. Compared with the best-known software crypto approach for graph spectral analysis, PrivateGraph, we have seen that TEE-Graph has 10[3]-10[5] times lower computation, storage, and communication costs. Furthermore, the proposed access-pattern protection method incurs only about 10%-25% of the overall computation cost.

DISCUSSION: Our experimentation showed that TEE-Graph performs significantly better and has lower costs than typical software approaches. It also addresses the unique ownership and access-pattern issues that other TEE-related graph analytics approaches have not sufficiently studied. The proposed approach can be extended to other graph analytics problems with strong ownership and access-pattern protection.}, } @article {pmid38093855, year = {2024}, author = {Ortega Candel, JM and Mora Gimeno, FJ and Mora Mora, H}, title = {Generation of a dataset for DoW attack detection in serverless architectures.}, journal = {Data in brief}, volume = {52}, number = {}, pages = {109921}, pmid = {38093855}, issn = {2352-3409}, abstract = {Denial of Wallet (DoW) attacks refers to a type of cyberattack that aims to exploit and exhaust the financial resources of an organization by triggering excessive costs or charges within their cloud or serverless computing environment. These attacks are particularly relevant in the context of serverless architectures due to characteristics like pay-as-you-go model, auto-scaling, limited control and cost amplification. Serverless computing, often referred to as Function-as-a-Service (FaaS), is a cloud computing model that allows developers to build and run applications without the need to manage traditional server infrastructure. Serverless architectures have gained popularity in cloud computing due to their flexibility and ability to scale automatically based on demand. These architectures are based on executing functions without the need to manage the underlying infrastructure. However, the lack of realistic and representative datasets that simulate function invocations in serverless environments has been a challenge for research and development of solutions in this field. The aim is to create a dataset for simulating function invocations in serverless architectures, that is a valuable practice for ensuring the reliability, efficiency, and security of serverless applications. Furthermore, we propose a methodology for the generation of the dataset, which involves the generation of synthetic data from traffic generated on cloud platforms and the identification of the main characteristics of function invocations. These characteristics include SubmitTime, Invocation Delay, Response Delay, Function Duration, Active Functions at Request, Active Functions at Response. By generating this dataset, we expect to facilitate the detection of Denial of Wallet (DoW) attacks using machine learning techniques and neural networks. In this way, this dataset available in Mendeley data repository could provide other researchers and developers with a dataset to test and evaluate machine learning algorithms or use other techniques based on the detection of attacks and anomalies in serverless environments.}, } @article {pmid38090001, year = {2023}, author = {Quan, G and Yao, Z and Chen, L and Fang, Y and Zhu, W and Si, X and Li, M}, title = {A trusted medical data sharing framework for edge computing leveraging blockchain and outsourced computation.}, journal = {Heliyon}, volume = {9}, number = {12}, pages = {e22542}, pmid = {38090001}, issn = {2405-8440}, abstract = {Traditional cloud-centric approaches to medical data sharing pose risks related to real-time performance, security, and stability. Medical and healthcare data encounter challenges like data silos, privacy breaches, and transmission latency. In response to these challenges, this paper introduces a blockchain-based framework for trustworthy medical data sharing in edge computing environments. Leveraging healthcare consortium edge blockchains, this framework enables fine-grained access control to medical data. Specifically, it addresses the real-time, multi-attribute authorization challenge in CP-ABE through a Distributed Attribute Authorization strategy (DAA) based on blockchain. Furthermore, it tackles the key security issues in CP-ABE through a Distributed Key Generation protocol (DKG) based on blockchain. To address computational resource constraints in CP-ABE, we enhance a Distributed Modular Exponentiation Outsourcing algorithm (DME) and elevate its verifiable probability to "1". Theoretical analysis establishes the IND-CPA security of this framework in the Random Oracle Model. Experimental results demonstrate the effectiveness of our solution for resource-constrained end-user devices in edge computing environments.}, } @article {pmid38082849, year = {2023}, author = {Calo, J and Lo, B}, title = {IoT Federated Blockchain Learning at the Edge.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2023}, number = {}, pages = {1-4}, doi = {10.1109/EMBC40787.2023.10339946}, pmid = {38082849}, issn = {2694-0604}, mesh = {Humans ; *Blockchain ; Hospitals ; Intelligence ; Machine Learning ; *Medicine ; }, abstract = {IoT devices are sorely underutilized in the medical field, especially within machine learning for medicine, yet they offer unrivaled benefits. IoT devices are low cost, energy efficient, small and intelligent devices [1].In this paper, we propose a distributed federated learning framework for IoT devices, more specifically for IoMT (In-ternet of Medical Things), using blockchain to allow for a decentralized scheme improving privacy and efficiency over a centralized system; this allows us to move from the cloud based architectures, that are prevalent, to the edge.The system is designed for three paradigms: 1) Training neural networks on IoT devices to allow for collaborative training of a shared model whilst decoupling the learning from the dataset [2] to ensure privacy [3]. Training is performed in an online manner simultaneously amongst all participants, allowing for training of actual data that may not have been present in a dataset collected in the traditional way and dynamically adapt the system whilst it is being trained. 2) Training of an IoMT system in a fully private manner such as to mitigate the issue with confidentiality of medical data and to build robust, and potentially bespoke [4], models where not much, if any, data exists. 3) Distribution of the actual network training, something federated learning itself does not do, to allow hospitals, for example, to utilize their spare computing resources to train network models.}, } @article {pmid38077560, year = {2023}, author = {Wang, Z}, title = {An English course practice evaluation system based on multi-source mobile information and IoT technology.}, journal = {PeerJ. Computer science}, volume = {9}, number = {}, pages = {e1615}, pmid = {38077560}, issn = {2376-5992}, abstract = {With the increased use of online English courses, the quality of the course directly determines its efficacy. Recently, various industries have continuously employed Internet of Things (IoT) technology, which has considerable scene adaptability. To better supervise the specific content of English courses, we discuss how to apply multi-source mobile Internet of Things information technology to the practical evaluation system of English courses to boost the performance of English learning evaluation. Therefore, by analyzing the problems of existing English course evaluation and the characteristics of multi-source mobile Internet of Things information technology, this article designs an English course practical evaluation system based on multi-source data collection, processing, and analysis. The system can collect real-time student voices, behavior, and other data through mobile devices. Then, analyze the data using cloud computing and data mining technology and provide real-time learning progress and feedback. We can demonstrate that the accuracy of the evaluation system can reach 80.23%, which can effectively improve the efficiency of English learning evaluation, provide a new method for English teaching evaluation, and further improve and optimize the English education teaching content to meet the needs of the actual teaching environment.}, } @article {pmid38077558, year = {2023}, author = {Gu, H and Wang, J and Yu, J and Wang, D and Li, B and He, X and Yin, X}, title = {Towards virtual machine scheduling research based on multi-decision AHP method in the cloud computing platform.}, journal = {PeerJ. Computer science}, volume = {9}, number = {}, pages = {e1675}, pmid = {38077558}, issn = {2376-5992}, abstract = {Virtual machine scheduling and resource allocation mechanism in the process of dynamic virtual machine consolidation is a promising access to alleviate the cloud data centers of prominent energy consumption and service level agreement violations with improvement in quality of service (QoS). In this article, we propose an efficient algorithm (AESVMP) based on the Analytic Hierarchy Process (AHP) for the virtual machine scheduling in accordance with the measure. Firstly, we take into consideration three key criteria including the host of power consumption, available resource and resource allocation balance ratio, in which the ratio can be calculated by the balance value between overall three-dimensional resource (CPU, RAM, BW) flat surface and resource allocation flat surface (when new migrated virtual machine (VM) consumed the targeted host's resource). Then, virtual machine placement decision is determined by the application of multi-criteria decision making techniques AHP embedded with the above-mentioned three criteria. Extensive experimental results based on the CloudSim emulator using 10 PlanetLab workloads demonstrate that the proposed approach can reduce the cloud data center of number of migration, service level agreement violation (SLAV), aggregate indicators of energy comsumption (ESV) by an average of 51.76%, 67.4%, 67.6% compared with the cutting-edge method LBVMP, which validates the effectiveness.}, } @article {pmid38077531, year = {2023}, author = {Eljack, S and Jemmali, M and Denden, M and Turki, S and Khedr, WM and Algashami, AM and ALsadig, M}, title = {A secure solution based on load-balancing algorithms between regions in the cloud environment.}, journal = {PeerJ. Computer science}, volume = {9}, number = {}, pages = {e1513}, pmid = {38077531}, issn = {2376-5992}, abstract = {The problem treated in this article is the storage of sensitive data in the cloud environment and how to choose regions and zones to minimize the number of transfer file events. Handling sensitive data in the global internet network many times can increase risks and minimize security levels. Our work consists of scheduling several files on the different regions based on the security and load balancing parameters in the cloud. Each file is characterized by its size. If data is misplaced from the start it will require a transfer from one region to another and sometimes from one area to another. The objective is to find a schedule that assigns these files to the appropriate region ensuring the load balancing executed in each region to guarantee the minimum number of migrations. This problem is NP-hard. A novel model regarding the regional security and load balancing of files in the cloud environment is proposed in this article. This model is based on the component called "Scheduler" which utilizes the proposed algorithms to solve the problem. This model is a secure solution to guarantee an efficient dispersion of the stored files to avoid the most storage in one region. Consequently, damage to this region does not cause a loss of big data. In addition, a novel method called the "Grouping method" is proposed. Several variants of the application of this method are utilized to propose novel algorithms for solving the studied problem. Initially, seven algorithms are proposed in this article. The experimental results show that there is no dominance between these algorithms. Therefore, three combinations of these seven algorithms generate three other algorithms with better results. Based on the dominance rule, only six algorithms are selected to discuss the performance of the proposed algorithms. Four classes of instances are generated to measure and test the performance of algorithms. In total, 1,360 instances are tested. Three metrics are used to assess the algorithms and make a comparison between them. The experimental results show that the best algorithm is the "Best-value of four algorithms" in 86.5% of cases with an average gap of 0.021 and an average running time of 0.0018 s.}, } @article {pmid38074363, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: The Rise of Cloud Computing: Data Protection, Privacy, and Open Research Challenges-A Systematic Literature Review (SLR).}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9838129}, pmid = {38074363}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/8303504.].}, } @article {pmid38074307, year = {2023}, author = {Mangana, CM and Barraquer, A and Ferragut-Alegre, Á and Santolaria, G and Olivera, M and Barraquer, R}, title = {Detection of graft failure in post-keratoplasty patients by automated deep learning.}, journal = {Saudi journal of ophthalmology : official journal of the Saudi Ophthalmological Society}, volume = {37}, number = {3}, pages = {207-210}, pmid = {38074307}, issn = {1319-4534}, abstract = {PURPOSE: Detection of graft failure of post-penetrating keratoplasty (PKP) patients from the proprietary dataset using algorithms trained in Automated Deep Learning (AutoML).

METHODS: This was an observational cross-sectional study, for which AutoML algorithms were trained following the success/failure labeling strategy based on clinical notes, on a cohort corresponding to 220 images of post-keratoplasty anterior pole eyes. Once the image quality criteria were analyzed and the dataset was pseudo-anonymized, it was transferred to the Google Cloud Platform, where using the Vertex AI-AutoML API, cloud- and edge-based algorithms were trained, following expert recommendations on dataset splitting (80% training, 10% test, and 10% validation).

RESULTS: The metrics obtained in the cloud-based and edge-based models have been similar, but we chose to analyze the edge model as it is an exportable model, lighter and cheaper to train. The initial results of the model presented an accuracy of 95.83%, with a specificity of 91.67% and a sensitivity of 100%, obtaining an F1SCORE of 95.996% and a precision of 92.30%. Other metrics, such as the area under the curve, confusion matrix, and activation map development, were contemplated.

CONCLUSION: Initial results indicate the possibility of training algorithms in an automated fashion for the detection of graft failure in patients who underwent PKP. These algorithms are very lightweight tools easily integrated into mobile or desktop applications, potentially allowing every corneal transplant patient to have access to the best knowledge to enable the correct and timely diagnosis and treatment of graft failure. Although the results were good, because of the relatively small dataset, it is possible the data have some tendency to overfitting. AutoML opens the possibility of working in the field of artificial intelligence by computer vision to professionals with little experience and knowledge of programming.}, } @article {pmid38072221, year = {2024}, author = {Doo, FX and Kulkarni, P and Siegel, EL and Toland, M and Yi, PH and Carlos, RC and Parekh, VS}, title = {Economic and Environmental Costs of Cloud Technologies for Medical Imaging and Radiology Artificial Intelligence.}, journal = {Journal of the American College of Radiology : JACR}, volume = {21}, number = {2}, pages = {248-256}, doi = {10.1016/j.jacr.2023.11.011}, pmid = {38072221}, issn = {1558-349X}, mesh = {*Artificial Intelligence ; Cloud Computing ; *Radiology ; Costs and Cost Analysis ; Diagnostic Imaging ; }, abstract = {Radiology is on the verge of a technological revolution driven by artificial intelligence (including large language models), which requires robust computing and storage capabilities, often beyond the capacity of current non-cloud-based informatics systems. The cloud presents a potential solution for radiology, and we should weigh its economic and environmental implications. Recently, cloud technologies have become a cost-effective strategy by providing necessary infrastructure while reducing expenditures associated with hardware ownership, maintenance, and upgrades. Simultaneously, given the optimized energy consumption in modern cloud data centers, this transition is expected to reduce the environmental footprint of radiologic operations. The path to cloud integration comes with its own challenges, and radiology informatics leaders must consider elements such as cloud architectural choices, pricing, data security, uptime service agreements, user training and support, and broader interoperability. With the increasing importance of data-driven tools in radiology, understanding and navigating the cloud landscape will be essential for the future of radiology and its various stakeholders.}, } @article {pmid38069903, year = {2024}, author = {Mirchandani, CD and Shultz, AJ and Thomas, GWC and Smith, SJ and Baylis, M and Arnold, B and Corbett-Detig, R and Enbody, E and Sackton, TB}, title = {A Fast, Reproducible, High-throughput Variant Calling Workflow for Population Genomics.}, journal = {Molecular biology and evolution}, volume = {41}, number = {1}, pages = {}, pmid = {38069903}, issn = {1537-1719}, mesh = {Animals ; *Software ; *Metagenomics ; Workflow ; Genomics ; Sequence Analysis, DNA ; High-Throughput Nucleotide Sequencing ; }, abstract = {The increasing availability of genomic resequencing data sets and high-quality reference genomes across the tree of life present exciting opportunities for comparative population genomic studies. However, substantial challenges prevent the simple reuse of data across different studies and species, arising from variability in variant calling pipelines, data quality, and the need for computationally intensive reanalysis. Here, we present snpArcher, a flexible and highly efficient workflow designed for the analysis of genomic resequencing data in nonmodel organisms. snpArcher provides a standardized variant calling pipeline and includes modules for variant quality control, data visualization, variant filtering, and other downstream analyses. Implemented in Snakemake, snpArcher is user-friendly, reproducible, and designed to be compatible with high-performance computing clusters and cloud environments. To demonstrate the flexibility of this pipeline, we applied snpArcher to 26 public resequencing data sets from nonmammalian vertebrates. These variant data sets are hosted publicly to enable future comparative population genomic analyses. With its extensibility and the availability of public data sets, snpArcher will contribute to a broader understanding of genetic variation across species by facilitating the rapid use and reuse of large genomic data sets.}, } @article {pmid38067890, year = {2023}, author = {Kiarashi, Y and Saghafi, S and Das, B and Hegde, C and Madala, VSK and Nakum, A and Singh, R and Tweedy, R and Doiron, M and Rodriguez, AD and Levey, AI and Clifford, GD and Kwon, H}, title = {Graph Trilateration for Indoor Localization in Sparsely Distributed Edge Computing Devices in Complex Environments Using Bluetooth Technology.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {23}, pages = {}, pmid = {38067890}, issn = {1424-8220}, support = {cox-emory2019//James M. Cox Foundation and Cox Enterprises, Inc/ ; }, mesh = {Humans ; *Cloud Computing ; Wireless Technology ; Health Status ; Movement ; *Spatial Navigation/physiology ; }, abstract = {Spatial navigation patterns in indoor space usage can reveal important cues about the cognitive health of participants. In this work, we present a low-cost, scalable, open-source edge computing system using Bluetooth low energy (BLE) beacons for tracking indoor movements in a large, 1700 m2 facility used to carry out therapeutic activities for participants with mild cognitive impairment (MCI). The facility is instrumented with 39 edge computing systems, along with an on-premise fog server. The participants carry a BLE beacon, in which BLE signals are received and analyzed by the edge computing systems. Edge computing systems are sparsely distributed in the wide, complex indoor space, challenging the standard trilateration technique for localizing subjects, which assumes a dense installation of BLE beacons. We propose a graph trilateration approach that considers the temporal density of hits from the BLE beacon to surrounding edge devices to handle the inconsistent coverage of edge devices. This proposed method helps us tackle the varying signal strength, which leads to intermittent detection of beacons. The proposed method can pinpoint the positions of multiple participants with an average error of 4.4 m and over 85% accuracy in region-level localization across the entire study area. Our experimental results, evaluated in a clinical environment, suggest that an ordinary medical facility can be transformed into a smart space that enables automatic assessment of individuals' movements, which may reflect health status or response to treatment.}, } @article {pmid38067868, year = {2023}, author = {Garcia-Perez, A and Miñón, R and Torre-Bastida, AI and Zulueta-Guerrero, E}, title = {Analysing Edge Computing Devices for the Deployment of Embedded AI.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {23}, pages = {}, pmid = {38067868}, issn = {1424-8220}, support = {SONETO project, ref. KK-2023/00038//Basque Government Elkartek program/ ; }, abstract = {In recent years, more and more devices are connected to the network, generating an overwhelming amount of data. This term that is booming today is known as the Internet of Things. In order to deal with these data close to the source, the term Edge Computing arises. The main objective is to address the limitations of cloud processing and satisfy the growing demand for applications and services that require low latency, greater efficiency and real-time response capabilities. Furthermore, it is essential to underscore the intrinsic connection between artificial intelligence and edge computing within the context of our study. This integral relationship not only addresses the challenges posed by data proliferation but also propels a transformative wave of innovation, shaping a new era of data processing capabilities at the network's edge. Edge devices can perform real-time data analysis and make autonomous decisions without relying on constant connectivity to the cloud. This article aims at analysing and comparing Edge Computing devices when artificial intelligence algorithms are deployed on them. To this end, a detailed experiment involving various edge devices, models and metrics is conducted. In addition, we will observe how artificial intelligence accelerators such as Tensor Processing Unit behave. This analysis seeks to respond to the choice of a device that best suits the necessary AI requirements. As a summary, in general terms, the Jetson Nano provides the best performance when only CPU is used. Nevertheless the utilisation of a TPU drastically enhances the results.}, } @article {pmid38067859, year = {2023}, author = {Balatsouras, CP and Karras, A and Karras, C and Karydis, I and Sioutas, S}, title = {WiCHORD+: A Scalable, Sustainable, and P2P Chord-Based Ecosystem for Smart Agriculture Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {23}, pages = {}, pmid = {38067859}, issn = {1424-8220}, support = {Save-Water//European Union and national funds of Greece and Albania under the Interreg IPA II Cross-border Cooperation Programme "Greece - Albania 2014-2020"/ ; }, abstract = {In the evolving landscape of Industry 4.0, the convergence of peer-to-peer (P2P) systems, LoRa-enabled wireless sensor networks (WSNs), and distributed hash tables (DHTs) represents a major advancement that enhances sustainability in the modern agriculture framework and its applications. In this study, we propose a P2P Chord-based ecosystem for sustainable and smart agriculture applications, inspired by the inner workings of the Chord protocol. The node-centric approach of WiCHORD+ is a standout feature, streamlining operations in WSNs and leading to more energy-efficient and straightforward system interactions. Instead of traditional key-centric methods, WiCHORD+ is a node-centric protocol that is compatible with the inherent characteristics of WSNs. This unique design integrates seamlessly with distributed hash tables (DHTs), providing an efficient mechanism to locate nodes and ensure robust data retrieval while reducing energy consumption. Additionally, by utilizing the MAC address of each node in data routing, WiCHORD+ offers a more direct and efficient data lookup mechanism, essential for the timely and energy-efficient operation of WSNs. While the increasing dependence of smart agriculture on cloud computing environments for data storage and machine learning techniques for real-time prediction and analytics continues, frameworks like the proposed WiCHORD+ appear promising for future IoT applications due to their compatibility with modern devices and peripherals. Ultimately, the proposed approach aims to effectively incorporate LoRa, WSNs, DHTs, cloud computing, and machine learning, by providing practical solutions to the ongoing challenges in the current smart agriculture landscape and IoT applications.}, } @article {pmid38067809, year = {2023}, author = {Park, J and Jeong, J}, title = {An Autoscaling System Based on Predicting the Demand for Resources and Responding to Failure in Forecasting.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {23}, pages = {}, pmid = {38067809}, issn = {1424-8220}, support = {2018R1A5A7023490//National Research Foundation of Korea/ ; 2021R1F1A1061514//National Research Foundation of Korea/ ; S-2022-G0001-00070//Dongguk University/ ; }, abstract = {In recent years, the convergence of edge computing and sensor technologies has become a pivotal frontier revolutionizing real-time data processing. In particular, the practice of data acquisition-which encompasses the collection of sensory information in the form of images and videos, followed by their transmission to a remote cloud infrastructure for subsequent analysis-has witnessed a notable surge in adoption. However, to ensure seamless real-time processing irrespective of the data volume being conveyed or the frequency of incoming requests, it is vital to proactively locate resources within the cloud infrastructure specifically tailored to data-processing tasks. Many studies have focused on the proactive prediction of resource demands through the use of deep learning algorithms, generating considerable interest in real-time data processing. Nonetheless, an inherent risk arises when relying solely on predictive resource allocation, as it can heighten the susceptibility to system failure. In this study, a framework that includes algorithms that periodically monitor resource requirements and dynamically adjust resource provisioning to match the actual demand is proposed. Under experimental conditions with the Bitbrains dataset, setting the network throughput to 300 kB/s and with a threshold of 80%, the proposed system provides a 99% performance improvement in terms of the autoscaling algorithm and requires only 0.43 ms of additional computational overhead compared to relying on a simple prediction model alone.}, } @article {pmid38067758, year = {2023}, author = {Khan, A and Khattak, KS and Khan, ZH and Gulliver, TA and Abdullah, }, title = {Edge Computing for Effective and Efficient Traffic Characterization.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {23}, pages = {}, pmid = {38067758}, issn = {1424-8220}, support = {National Center for Big Data and Cloud Computing//Higher Education Commission/ ; }, abstract = {Traffic flow analysis is essential to develop smart urban mobility solutions. Although numerous tools have been proposed, they employ only a small number of parameters. To overcome this limitation, an edge computing solution is proposed based on nine traffic parameters, namely, vehicle count, direction, speed, and type, flow, peak hour factor, density, time headway, and distance headway. The proposed low-cost solution is easy to deploy and maintain. The sensor node is comprised of a Raspberry Pi 4, Pi camera, Intel Movidius Neural Compute Stick 2, Xiaomi MI Power Bank, and Zong 4G Bolt+. Pre-trained models from the OpenVINO Toolkit are employed for vehicle detection and classification, and a centroid tracking algorithm is used to estimate vehicle speed. The measured traffic parameters are transmitted to the ThingSpeak cloud platform via 4G. The proposed solution was field-tested for one week (7 h/day), with approximately 10,000 vehicles per day. The count, classification, and speed accuracies obtained were 79.8%, 93.2%, and 82.9%, respectively. The sensor node can operate for approximately 8 h with a 10,000 mAh power bank and the required data bandwidth is 1.5 MB/h. The proposed edge computing solution overcomes the limitations of existing traffic monitoring systems and can work in hostile environments.}, } @article {pmid38067756, year = {2023}, author = {Aljebreen, M and Alohali, MA and Mahgoub, H and Aljameel, SS and Alsumayt, A and Sayed, A}, title = {Multi-Objective Seagull Optimization Algorithm with Deep Learning-Enabled Vulnerability Detection for Secure Cloud Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {23}, pages = {}, pmid = {38067756}, issn = {1424-8220}, support = {PNURSP2023R330//Princess Nourah bint Abdulrahman University/ ; RSP2023R459//King Saud University/ ; }, abstract = {Cloud computing (CC) is an internet-enabled environment that provides computing services such as networking, databases, and servers to clients and organizations in a cost-effective manner. Despite the benefits rendered by CC, its security remains a prominent concern to overcome. An intrusion detection system (IDS) is generally used to detect both normal and anomalous behavior in networks. The design of IDS using a machine learning (ML) technique comprises a series of methods that can learn patterns from data and forecast the outcomes consequently. In this background, the current study designs a novel multi-objective seagull optimization algorithm with a deep learning-enabled vulnerability detection (MOSOA-DLVD) technique to secure the cloud platform. The MOSOA-DLVD technique uses the feature selection (FS) method and hyperparameter tuning strategy to identify the presence of vulnerabilities or attacks in the cloud infrastructure. Primarily, the FS method is implemented using the MOSOA technique. Furthermore, the MOSOA-DLVD technique uses a deep belief network (DBN) method for intrusion detection and its classification. In order to improve the detection outcomes of the DBN algorithm, the sooty tern optimization algorithm (STOA) is applied for the hyperparameter tuning process. The performance of the proposed MOSOA-DLVD system was validated with extensive simulations upon a benchmark IDS dataset. The improved intrusion detection results of the MOSOA-DLVD approach with a maximum accuracy of 99.34% establish the proficiency of the model compared with recent methods.}, } @article {pmid38067703, year = {2023}, author = {Cicero, S and Guarascio, M and Guerrieri, A and Mungari, S}, title = {A Deep Anomaly Detection System for IoT-Based Smart Buildings.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {23}, pages = {}, pmid = {38067703}, issn = {1424-8220}, abstract = {In recent years, technological advancements in sensor, communication, and data storage technologies have led to the increasingly widespread use of smart devices in different types of buildings, such as residential homes, offices, and industrial installations. The main benefit of using these devices is the possibility of enhancing different crucial aspects of life within these buildings, including energy efficiency, safety, health, and occupant comfort. In particular, the fast progress in the field of the Internet of Things has yielded exponential growth in the number of connected smart devices and, consequently, increased the volume of data generated and exchanged. However, traditional Cloud-Computing platforms have exhibited limitations in their capacity to handle and process the continuous data exchange, leading to the rise of new computing paradigms, such as Edge Computing and Fog Computing. In this new complex scenario, advanced Artificial Intelligence and Machine Learning can play a key role in analyzing the generated data and predicting unexpected or anomalous events, allowing for quickly setting up effective responses against these unexpected events. To the best of our knowledge, current literature lacks Deep-Learning-based approaches specifically devised for guaranteeing safety in IoT-Based Smart Buildings. For this reason, we adopt an unsupervised neural architecture for detecting anomalies, such as faults, fires, theft attempts, and more, in such contexts. In more detail, in our proposal, data from a sensor network are processed by a Sparse U-Net neural model. The proposed approach is lightweight, making it suitable for deployment on the edge nodes of the network, and it does not require a pre-labeled training dataset. Experimental results conducted on a real-world case study demonstrate the effectiveness of the developed solution.}, } @article {pmid38067697, year = {2023}, author = {Mehmood, KT and Atiq, S and Hussain, MM}, title = {Enhancing QoS of Telecom Networks through Server Load Management in Software-Defined Networking (SDN).}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {23}, pages = {}, pmid = {38067697}, issn = {1424-8220}, abstract = {In the modern era, with the emergence of the Internet of Things (IoT), big data applications, cloud computing, and the ever-increasing demand for high-speed internet with the aid of upgraded telecom network resources, users now require virtualization of the network for smart handling of modern-day challenges to obtain better services (in terms of security, reliability, scalability, etc.). These requirements can be fulfilled by using software-defined networking (SDN). This research article emphasizes one of the major aspects of the practical implementation of SDN to enhance the QoS of a virtual network through the load management of network servers. In an SDN-based network, several servers are available to fulfill users' hypertext transfer protocol (HTTP) requests to ensure dynamic routing under the influence of the SDN controller. However, if the number of requests is directed to a specific server, the controller is bound to follow the user-programmed instructions, and the load on that server is increased, which results in (a) an increase in end-to-end user delay, (b) a decrease in the data transfer rate, and (c) a decrease in the available bandwidth of the targeted server. All of the above-mentioned factors will result in the degradation of network QoS. With the implementation of the proposed algorithm, dynamic active sensing server load management (DASLM), on the SDN controller, the load on the server is shared based on QoS control parameters (throughput, response time, round trip time, etc.). The overall delay is reduced, and the bandwidth utilization along with throughput is also increased.}, } @article {pmid38062043, year = {2023}, author = {Stanimirova, R and Tarrio, K and Turlej, K and McAvoy, K and Stonebrook, S and Hu, KT and Arévalo, P and Bullock, EL and Zhang, Y and Woodcock, CE and Olofsson, P and Zhu, Z and Barber, CP and Souza, CM and Chen, S and Wang, JA and Mensah, F and Calderón-Loor, M and Hadjikakou, M and Bryan, BA and Graesser, J and Beyene, DL and Mutasha, B and Siame, S and Siampale, A and Friedl, MA}, title = {A global land cover training dataset from 1984 to 2020.}, journal = {Scientific data}, volume = {10}, number = {1}, pages = {879}, pmid = {38062043}, issn = {2052-4463}, support = {80NSSC18K0994//National Aeronautics and Space Administration (NASA)/ ; }, abstract = {State-of-the-art cloud computing platforms such as Google Earth Engine (GEE) enable regional-to-global land cover and land cover change mapping with machine learning algorithms. However, collection of high-quality training data, which is necessary for accurate land cover mapping, remains costly and labor-intensive. To address this need, we created a global database of nearly 2 million training units spanning the period from 1984 to 2020 for seven primary and nine secondary land cover classes. Our training data collection approach leveraged GEE and machine learning algorithms to ensure data quality and biogeographic representation. We sampled the spectral-temporal feature space from Landsat imagery to efficiently allocate training data across global ecoregions and incorporated publicly available and collaborator-provided datasets to our database. To reflect the underlying regional class distribution and post-disturbance landscapes, we strategically augmented the database. We used a machine learning-based cross-validation procedure to remove potentially mis-labeled training units. Our training database is relevant for a wide array of studies such as land cover change, agriculture, forestry, hydrology, urban development, among many others.}, } @article {pmid38061141, year = {2024}, author = {Long, K and Chen, Z and Zhang, H and Zhang, M}, title = {Spatiotemporal disturbances and attribution analysis of mangrove in southern China from 1986 to 2020 based on time-series Landsat imagery.}, journal = {The Science of the total environment}, volume = {912}, number = {}, pages = {169157}, doi = {10.1016/j.scitotenv.2023.169157}, pmid = {38061141}, issn = {1879-1026}, abstract = {As one of the most productive ecosystems in the world, mangrove has a critical role to play in both the natural ecosystem and the human economic and social society. However, two thirds of the world's mangrove have been irreversibly damaged over the past 100 years, as a result of ongoing human activities and climate change. In this paper, adopting Landsat for the past 36 years as the data source, the detection of spatiotemporal changes of mangrove in southern China was carried out based on the Google Earth Engine (GEE) cloud platform using the LandTrendr algorithm. In addition, the attribution of mangrove disturbances was analyzed by a random forest algorithm. The results indicated the area of mangrove recovery (5174.64 hm[2]) was much larger than the area of mangrove disturbances (1625.40 hm[2]) over the 35-year period in the study area. The disturbances of mangrove in southern China were dominated by low and low-to-medium-level disturbances, with an area of 1009.89 hm[2], accounting for 57.50 % of the total disturbances. The mangrove recovery was also dominated by low and low-to-medium-level recovery, with an area of 3239.19 hm[2], accounting for 62.61 % of the total recovery area. Both human and natural factors interacted and influenced each other, together causing spatiotemporal disturbances of mangrove in southern China during 1986-2020. The mangrove disturbances in the Phase I (1986-2000) and Phase III (2011-2020) were characterized by human-induced (50.74 % and 58.86 %), such as construction of roads and aquaculture ponds. The mangrove disturbances in the Phase II (2001-2010) were dominated by natural factors (55.73 %), such as tides, flooding, and species invasions. It was also observed that the area of mangrove recovery in southern China increased dramatically from 1986 to 2020 due to the promulgation and implementation of the Chinese government's policy on mangrove protection, as well as increased human awareness of mangrove wetland protection.}, } @article {pmid38053971, year = {2023}, author = {Bernardi, M and Cardarelli, F}, title = {Phasor identifier: A cloud-based analysis of phasor-FLIM data on Python notebooks.}, journal = {Biophysical reports}, volume = {3}, number = {4}, pages = {100135}, pmid = {38053971}, issn = {2667-0747}, abstract = {This paper introduces an innovative approach utilizing Google Colaboratory for the versatile analysis of phasor fluorescence lifetime imaging microscopy (FLIM) data collected from various samples (e.g., cuvette, cells, tissues) and in various input file formats. In fact, phasor-FLIM widespread adoption has been hampered by complex instrumentation and data analysis requirements. We mean to make advanced FLIM analysis more accessible to researchers through a cloud-based solution that 1) harnesses robust computational resources, 2) eliminates hardware limitations, and 3) supports both CPU and GPU processing. We envision a paradigm shift in FLIM data accessibility and potential, aligning with the evolving field of artificial intelligence-driven FLIM analysis. This approach simplifies FLIM data handling and opens doors for diverse applications, from studying cellular metabolism to investigating drug encapsulation, benefiting researchers across multiple domains. The comparative analysis of freely distributed FLIM tools highlights the unique advantages of this approach in terms of adaptability, scalability, and open-source nature.}, } @article {pmid38053860, year = {2023}, author = {Moparthi, NR and Balakrishna, G and Chithaluru, P and Kolla, M and Kumar, M}, title = {An improved energy-efficient cloud-optimized load-balancing for IoT frameworks.}, journal = {Heliyon}, volume = {9}, number = {11}, pages = {e21947}, pmid = {38053860}, issn = {2405-8440}, abstract = {As wireless communication grows, so does the need for smart, simple, affordable solutions. The need prompted academics to develop appropriate network solutions ranging from wireless sensor networks (WSNs) to the Internet of Things (IoT). With the innovations of researchers, the necessity for enhancements in existing researchers has increased. Initially, network protocols were the focus of study and development. Regardless, IoT devices are already being employed in different industries and collecting massive amounts of data through complicated applications. This necessitates IoT load-balancing research. Several studies tried to address the communication overheads produced by significant IoT network traffic. These studies intended to control network loads by evenly spreading them across IoT nodes. Eventually, the practitioners decided to migrate the IoT node data and the apps processing it to the cloud. So, the difficulty is to design a cloud-based load balancer algorithm that meets the criteria of IoT network protocols. Defined as a unique method for controlling loads on cloud-integrated IoT networks. The suggested method analyses actual and virtual host machine needs in cloud computing environments. The purpose of the proposed model is to design a load balancer that improves network response time while reducing energy consumption. The proposed load balancer algorithm may be easily integrated with peer-existing IoT frameworks. Handling the load for cloud-based IoT architectures with the above-described methods. Significantly boosts response time for the IoT network by 60 %. The proposed scheme has less energy consumption (31 %), less execution time (24\%), decreased node shutdown time (45 %), and less infrastructure cost (48\%) in comparison to existing frameworks. Based on the simulation results, it is concluded that the proposed framework offers an improved solution for IoT-based cloud load-balancing issues.}, } @article {pmid38053722, year = {2023}, author = {Tang, R and Aridas, NK and Talip, MSA}, title = {Design of a data processing method for the farmland environmental monitoring based on improved Spark components.}, journal = {Frontiers in big data}, volume = {6}, number = {}, pages = {1282352}, pmid = {38053722}, issn = {2624-909X}, abstract = {With the popularization of big data technology, agricultural data processing systems have become more intelligent. In this study, a data processing method for farmland environmental monitoring based on improved Spark components is designed. It introduces the FAST-Join (Join critical filtering sampling partition optimization) algorithm in the Spark component for equivalence association query optimization to improve the operating efficiency of the Spark component and cluster. The experimental results show that the amount of data written and read in Shuffle by Spark optimized by the FAST-join algorithm only accounts for 0.958 and 1.384% of the original data volume on average, and the calculation speed is 202.11% faster than the original. The average data processing time and occupied memory size of the Spark cluster are reduced by 128.22 and 76.75% compared with the originals. It also compared the cluster performance of the FAST-join and Equi-join algorithms. The Spark cluster optimized by the FAST-join algorithm reduced the processing time and occupied memory size by an average of 68.74 and 37.80% compared with the Equi-join algorithm, which shows that the FAST-join algorithm can effectively improve the efficiency of inter-data table querying and cluster computing.}, } @article {pmid38052579, year = {2023}, author = {Yu, L and Zhang, Z and Lai, Y and Zhao, Y and Mo, F}, title = {Edge computing-based intelligent monitoring system for manhole cover.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {20}, number = {10}, pages = {18792-18819}, doi = {10.3934/mbe.2023833}, pmid = {38052579}, issn = {1551-0018}, abstract = {Unusual states of manhole covers (MCs), such as being tilted, lost or flooded, can present substantial safety hazards and risks to pedestrians and vehicles on the roadway. Most MCs are still being managed through manual regular inspections and have limited information technology integration. This leads to time-consuming and labor-intensive identification with a lower level of accuracy. In this paper, we propose an edge computing-based intelligent monitoring system for manhole covers (EC-MCIMS). Sensors detect the MC and send status and positioning information via LoRa to the edge gateway located on the nearby wisdom pole. The edge gateway utilizes a lightweight machine learning model, trained on the edge impulse (EI) platform, which can predict the state of the MC. If an abnormality is detected, the display and voice device on the wisdom pole will respectively show and broadcast messages to alert pedestrians and vehicles. Simultaneously, the information is uploaded to the cloud platform, enabling remote maintenance personnel to promptly repair and restore it. Tests were performed on the EI platform and in Dongguan townships, demonstrating that the average response time for identifying MCs is 4.81 s. Higher responsiveness and lower power consumption were obtained compared to cloud computing models. Moreover, the system utilizes a lightweight model that better reduces read-only memory (ROM) and random-access memory (RAM), while maintaining an average identification accuracy of 94%.}, } @article {pmid38049547, year = {2023}, author = {Parashar, D and Kumar, A and Palni, S and Pandey, A and Singh, A and Singh, AP}, title = {Use of machine learning-based classification algorithms in the monitoring of Land Use and Land Cover practices in a hilly terrain.}, journal = {Environmental monitoring and assessment}, volume = {196}, number = {1}, pages = {8}, pmid = {38049547}, issn = {1573-2959}, mesh = {Cities ; *Environmental Monitoring/methods ; *Hot Temperature ; Algorithms ; Support Vector Machine ; }, abstract = {The current high rate of urbanization in developing countries and its consequences, like traffic congestion, slum development, scarcity of resources, and urban heat islands, raise a need for better Land Use Land Cover (LULC) classification mapping for improved planning. This study mainly deals with two objectives: 1) to explore the applicability of machine learning-based techniques, especially the Random forest (RF) algorithm and Support Vector Machine (SVM) algorithm as the potential classifiers for LULC mapping under different scenarios, and 2) to prepare a better LULC classification model for mountain terrain by using different indices with combination of spectral bands. Due to differences in topography, shadows, spectral confusion from overlapping spectral signatures of different land cover types, and a lack of access for ground verification, classification in mountainous terrain is difficult task compared to plain terrain classification. An enhanced LULC classification model has been designed using two popular machine learning (ML) classifier algorithms, SVM and RF, explicitly for mountainous terrains by taking into consideration of a study area of Gopeshwer town in the Chamoli district of Uttarakhand state, India. Online-based cloud platform Google Earth Engine (GEE) was used for overall processing. Four classification models were built using Sentinel 2B satellite imagery with 20m and 10m resolutions. Two of these models (Model 'i' based on RF algorithm and Model 'ii' based on SVM algorithm) were designed using spectral bands of visible and infrared wavelengths, and the other two (Model 'iii' based on RF algorithm and Model 'iv' based on SVM algorithm) with the addition of indices with spectral bands. The accuracy assessment was done using the confusion matrix based on the output results. Obtained result highlights that the overall accuracy for model 'i' and model 'ii' were 82% and 86% respectively, whereas these were 87.17% and 87.2% for model 'iii' and model 'iv' respectively. Finally, the study compared the performance of each model based on different accuracy metrics for better LULC mapping. It proposes an improved LULC classification model for mountainous terrains, which can contribute to better land management and planning in the study area.}, } @article {pmid38046398, year = {2023}, author = {Babar, M and Ahmad Jan, M and He, X and Usman Tariq, M and Mastorakis, S and Alturki, R}, title = {An Optimized IoT-enabled Big Data Analytics Architecture for Edge-Cloud Computing.}, journal = {IEEE internet of things journal}, volume = {10}, number = {5}, pages = {3995-4005}, pmid = {38046398}, issn = {2327-4662}, support = {P20 GM109090/GM/NIGMS NIH HHS/United States ; }, abstract = {The awareness of edge computing is attaining eminence and is largely acknowledged with the rise of Internet of Things (IoT). Edge-enabled solutions offer efficient computing and control at the network edge to resolve the scalability and latency-related concerns. Though, it comes to be challenging for edge computing to tackle diverse applications of IoT as they produce massive heterogeneous data. The IoT-enabled frameworks for Big Data analytics face numerous challenges in their existing structural design, for instance, the high volume of data storage and processing, data heterogeneity, and processing time among others. Moreover, the existing proposals lack effective parallel data loading and robust mechanisms for handling communication overhead. To address these challenges, we propose an optimized IoT-enabled big data analytics architecture for edge-cloud computing using machine learning. In the proposed scheme, an edge intelligence module is introduced to process and store the big data efficiently at the edges of the network with the integration of cloud technology. The proposed scheme is composed of two layers: IoT-edge and Cloud-processing. The data injection and storage is carried out with an optimized MapReduce parallel algorithm. Optimized Yet Another Resource Negotiator (YARN) is used for efficiently managing the cluster. The proposed data design is experimentally simulated with an authentic dataset using Apache Spark. The comparative analysis is decorated with existing proposals and traditional mechanisms. The results justify the efficiency of our proposed work.}, } @article {pmid38043630, year = {2024}, author = {Doo, FX and Parekh, VS and Kanhere, A and Savani, D and Tejani, AS and Sapkota, A and Yi, PH}, title = {Evaluation of Climate-Aware Metrics Tools for Radiology Informatics and Artificial Intelligence: Toward a Potential Radiology Ecolabel.}, journal = {Journal of the American College of Radiology : JACR}, volume = {21}, number = {2}, pages = {239-247}, doi = {10.1016/j.jacr.2023.11.019}, pmid = {38043630}, issn = {1558-349X}, mesh = {Humans ; Artificial Intelligence ; Radiography ; *Radiology ; *Medical Informatics ; Diagnostic Imaging ; }, abstract = {Radiology is a major contributor to health care's impact on climate change, in part due to its reliance on energy-intensive equipment as well as its growing technological reliance. Delivering modern patient care requires a robust informatics team to move images from the imaging equipment to the workstations and the health care system. Radiology informatics is the field that manages medical imaging IT. This involves the acquisition, storage, retrieval, and use of imaging information in health care to improve access and quality, which includes PACS, cloud services, and artificial intelligence. However, the electricity consumption of computing and the life cycle of various computer components expands the carbon footprint of health care. The authors provide a general framework to understand the environmental impact of clinical radiology informatics, which includes using the international Greenhouse Gas Protocol to draft a definition of scopes of emissions pertinent to radiology informatics, as well as exploring existing tools to measure and account for these emissions. A novel standard ecolabel for radiology informatics tools, such as the Energy Star label for consumer devices or Leadership in Energy and Environmental Design certification for buildings, should be developed to promote awareness and guide radiologists and radiology informatics leaders in making environmentally conscious decisions for their clinical practice. At this critical climate juncture, the radiology community has a unique and pressing obligation to consider our shared environmental responsibility in innovating clinical technology for patient care.}, } @article {pmid38042609, year = {2023}, author = {Shaikh, TA and Rasool, T and Verma, P}, title = {Machine intelligence and medical cyber-physical system architectures for smart healthcare: Taxonomy, challenges, opportunities, and possible solutions.}, journal = {Artificial intelligence in medicine}, volume = {146}, number = {}, pages = {102692}, doi = {10.1016/j.artmed.2023.102692}, pmid = {38042609}, issn = {1873-2860}, mesh = {Humans ; *Artificial Intelligence ; *Computer Security ; Delivery of Health Care ; Cloud Computing ; }, abstract = {Hospitals use medical cyber-physical systems (MCPS) more often to give patients quality continuous care. MCPS isa life-critical, context-aware, networked system of medical equipment. It has been challenging to achieve high assurance in system software, interoperability, context-aware intelligence, autonomy, security and privacy, and device certifiability due to the necessity to create complicated MCPS that are safe and efficient. The MCPS system is shown in the paper as a newly developed application case study of artificial intelligence in healthcare. Applications for various CPS-based healthcare systems are discussed, such as telehealthcare systems for managing chronic diseases (cardiovascular diseases, epilepsy, hearing loss, and respiratory diseases), supporting medication intake management, and tele-homecare systems. The goal of this study is to provide a thorough overview of the essential components of the MCPS from several angles, including design, methodology, and important enabling technologies, including sensor networks, the Internet of Things (IoT), cloud computing, and multi-agent systems. Additionally, some significant applications are investigated, such as smart cities, which are regarded as one of the key applications that will offer new services for industrial systems, transportation networks, energy distribution, monitoring of environmental changes, business and commerce applications, emergency response, and other social and recreational activities.The four levels of an MCPS's general architecture-data collecting, data aggregation, cloud processing, and action-are shown in this study. Different encryption techniques must be employed to ensure data privacy inside each layer due to the variations in hardware and communication capabilities of each layer. We compare established and new encryption techniques based on how well they support safe data exchange, secure computing, and secure storage. Our thorough experimental study of each method reveals that, although enabling innovative new features like secure sharing and safe computing, developing encryption approaches significantly increases computational and storage overhead. To increase the usability of newly developed encryption schemes in an MCPS and to provide a comprehensive list of tools and databases to assist other researchers, we provide a list of opportunities and challenges for incorporating machine intelligence-based MCPS in healthcare applications in our paper's conclusion.}, } @article {pmid38039654, year = {2024}, author = {Chen, X and Li, J and Chen, D and Zhou, Y and Tu, Z and Lin, M and Kang, T and Lin, J and Gong, T and Zhu, L and Zhou, J and Lin, OY and Guo, J and Dong, J and Guo, D and Qu, X}, title = {CloudBrain-MRS: An intelligent cloud computing platform for in vivo magnetic resonance spectroscopy preprocessing, quantification, and analysis.}, journal = {Journal of magnetic resonance (San Diego, Calif. : 1997)}, volume = {358}, number = {}, pages = {107601}, doi = {10.1016/j.jmr.2023.107601}, pmid = {38039654}, issn = {1096-0856}, mesh = {Humans ; *Cloud Computing ; *Artificial Intelligence ; Magnetic Resonance Spectroscopy/methods ; Magnetic Resonance Imaging/methods ; Software ; }, abstract = {Magnetic resonance spectroscopy (MRS) is an important clinical imaging method for diagnosis of diseases. MRS spectrum is used to observe the signal intensity of metabolites or further infer their concentrations. Although the magnetic resonance vendors commonly provide basic functions of spectrum plots and metabolite quantification, the spread of clinical research of MRS is still limited due to the lack of easy-to-use processing software or platform. To address this issue, we have developed CloudBrain-MRS, a cloud-based online platform that provides powerful hardware and advanced algorithms. The platform can be accessed simply through a web browser, without the need of any program installation on the user side. CloudBrain-MRS also integrates the classic LCModel and advanced artificial intelligence algorithms and supports batch preprocessing, quantification, and analysis of MRS data from different vendors. Additionally, the platform offers useful functions: (1) Automatically statistical analysis to find biomarkers for diseases; (2) Consistency verification between the classic and artificial intelligence quantification algorithms; (3) Colorful three-dimensional visualization for easy observation of individual metabolite spectrum. Last, data of both healthy subjects and patients with mild cognitive impairment are used to demonstrate the functions of the platform. To the best of our knowledge, this is the first cloud computing platform for in vivo MRS with artificial intelligence processing. We have shared our cloud platform at MRSHub, providing at least two years of free access and service. If you are interested, please visit https://mrshub.org/software_all/#CloudBrain-MRS or https://csrc.xmu.edu.cn/CloudBrain.html.}, } @article {pmid38035280, year = {2023}, author = {Zhao, K and Farrell, K and Mashiku, M and Abay, D and Tang, K and Oberste, MS and Burns, CC}, title = {A search-based geographic metadata curation pipeline to refine sequencing institution information and support public health.}, journal = {Frontiers in public health}, volume = {11}, number = {}, pages = {1254976}, pmid = {38035280}, issn = {2296-2565}, mesh = {*Metadata ; *Public Health ; High-Throughput Nucleotide Sequencing ; China ; United Kingdom ; }, abstract = {BACKGROUND: The National Center for Biotechnology Information (NCBI) Sequence Read Archive (SRA) has amassed a vast reservoir of genetic data since its inception in 2007. These public data hold immense potential for supporting pathogen surveillance and control. However, the lack of standardized metadata and inconsistent submission practices in SRA may impede the data's utility in public health.

METHODS: To address this issue, we introduce the Search-based Geographic Metadata Curation (SGMC) pipeline. SGMC utilized Python and web scraping to extract geographic data of sequencing institutions from NCBI SRA in the Cloud and its website. It then harnessed ChatGPT to refine the sequencing institution and location assignments. To illustrate the pipeline's utility, we examined the geographic distribution of the sequencing institutions and their countries relevant to polio eradication and categorized them.

RESULTS: SGMC successfully identified 7,649 sequencing institutions and their global locations from a random selection of 2,321,044 SRA accessions. These institutions were distributed across 97 countries, with strong representation in the United States, the United Kingdom and China. However, there was a lack of data from African, Central Asian, and Central American countries, indicating potential disparities in sequencing capabilities. Comparison with manually curated data for U.S. institutions reveals SGMC's accuracy rates of 94.8% for institutions, 93.1% for countries, and 74.5% for geographic coordinates.

CONCLUSION: SGMC may represent a novel approach using a generative AI model to enhance geographic data (country and institution assignments) for large numbers of samples within SRA datasets. This information can be utilized to bolster public health endeavors.}, } @article {pmid38035195, year = {2023}, author = {Olson, RH and Cohen Kalafut, N and Wang, D}, title = {MANGEM: A web app for multimodal analysis of neuronal gene expression, electrophysiology, and morphology.}, journal = {Patterns (New York, N.Y.)}, volume = {4}, number = {11}, pages = {100847}, pmid = {38035195}, issn = {2666-3899}, support = {P50 HD105353/HD/NICHD NIH HHS/United States ; R01 AG067025/AG/NIA NIH HHS/United States ; RF1 MH128695/MH/NIMH NIH HHS/United States ; }, abstract = {Single-cell techniques like Patch-seq have enabled the acquisition of multimodal data from individual neuronal cells, offering systematic insights into neuronal functions. However, these data can be heterogeneous and noisy. To address this, machine learning methods have been used to align cells from different modalities onto a low-dimensional latent space, revealing multimodal cell clusters. The use of those methods can be challenging without computational expertise or suitable computing infrastructure for computationally expensive methods. To address this, we developed a cloud-based web application, MANGEM (multimodal analysis of neuronal gene expression, electrophysiology, and morphology). MANGEM provides a step-by-step accessible and user-friendly interface to machine learning alignment methods of neuronal multimodal data. It can run asynchronously for large-scale data alignment, provide users with various downstream analyses of aligned cells, and visualize the analytic results. We demonstrated the usage of MANGEM by aligning multimodal data of neuronal cells in the mouse visual cortex.}, } @article {pmid38027905, year = {2023}, author = {Ait Abdelmoula, I and Idrissi Kaitouni, S and Lamrini, N and Jbene, M and Ghennioui, A and Mehdary, A and El Aroussi, M}, title = {Towards a sustainable edge computing framework for condition monitoring in decentralized photovoltaic systems.}, journal = {Heliyon}, volume = {9}, number = {11}, pages = {e21475}, pmid = {38027905}, issn = {2405-8440}, abstract = {In recent times, the rapid advancements in technology have led to a digital revolution in urban areas, and new computing frameworks are emerging to address the current issues in monitoring and fault detection, particularly in the context of the growing renewable decentralized energy systems. This research proposes a novel framework for monitoring the condition of decentralized photovoltaic systems within a smart city infrastructure. The approach uses edge computing to overcome the challenges associated with costly processing through remote cloud servers. By processing data at the edge of the network, this concept allows for significant gains in speed and bandwidth consumption, making it suitable for a sustainable city environment. In the proposed edge-learning scheme, several machine learning models are compared to find the best suitable model achieving both high accuracy and low latency in detecting photovoltaic faults. Four light and rapid machine learning models, namely, CBLOF, LOF, KNN, ANN, are selected as best performers and trained locally in decentralized edge nodes. The overall approach is deployed in a smart solar campus with multiple distributed PV units located in the R&D platform Green & Smart Building Park. Several experiments were conducted on different anomaly scenarios, and the models were evaluated based on their supervision method, f1-score, inference time, RAM usage, and model size. The paper also investigates the impact of the type of supervision and the class of the model on the anomaly detection performance. The findings indicated that the supervised artificial neural network (ANN) had superior performance compared to other models, obtaining an f1-score of 80 % even in the most unfavorable conditions. The findings also showed that KNN was the most suitable unsupervised model for the investigated experiments achieving good f1-scores (100 %, 95 % and 92 %) in 3 out of 4 scenarios making it a good candidate for similar anomaly detection tasks.}, } @article {pmid38027596, year = {2023}, author = {Mohammed, MA and Lakhan, A and Abdulkareem, KH and Khanapi Abd Ghani, M and Abdulameer Marhoon, H and Nedoma, J and Martinek, R}, title = {Multi-objectives reinforcement federated learning blockchain enabled Internet of things and Fog-Cloud infrastructure for transport data.}, journal = {Heliyon}, volume = {9}, number = {11}, pages = {e21639}, doi = {10.1016/j.heliyon.2023.e21639}, pmid = {38027596}, issn = {2405-8440}, abstract = {For the past decade, there has been a significant increase in customer usage of public transport applications in smart cities. These applications rely on various services, such as communication and computation, provided by additional nodes within the smart city environment. However, these services are delivered by a diverse range of cloud computing-based servers that are widely spread and heterogeneous, leading to cybersecurity becoming a crucial challenge among these servers. Numerous machine-learning approaches have been proposed in the literature to address the cybersecurity challenges in heterogeneous transport applications within smart cities. However, the centralized security and scheduling strategies suggested so far have yet to produce optimal results for transport applications. This work aims to present a secure decentralized infrastructure for transporting data in fog cloud networks. This paper introduces Multi-Objectives Reinforcement Federated Learning Blockchain (MORFLB) for Transport Infrastructure. MORFLB aims to minimize processing and transfer delays while maximizing long-term rewards by identifying known and unknown attacks on remote sensing data in-vehicle applications. MORFLB incorporates multi-agent policies, proof-of-work hashing validation, and decentralized deep neural network training to achieve minimal processing and transfer delays. It comprises vehicle applications, decentralized fog, and cloud nodes based on blockchain reinforcement federated learning, which improves rewards through trial and error. The study formulates a combinatorial problem that minimizes and maximizes various factors for vehicle applications. The experimental results demonstrate that MORFLB effectively reduces processing and transfer delays while maximizing rewards compared to existing studies. It provides a promising solution to address the cybersecurity challenges in intelligent transport applications within smart cities. In conclusion, this paper presents MORFLB, a combination of different schemes that ensure the execution of transport data under their constraints and achieve optimal results with the suggested decentralized infrastructure based on blockchain technology.}, } @article {pmid38027579, year = {2023}, author = {Guo, LL and Calligan, M and Vettese, E and Cook, S and Gagnidze, G and Han, O and Inoue, J and Lemmon, J and Li, J and Roshdi, M and Sadovy, B and Wallace, S and Sung, L}, title = {Development and validation of the SickKids Enterprise-wide Data in Azure Repository (SEDAR).}, journal = {Heliyon}, volume = {9}, number = {11}, pages = {e21586}, pmid = {38027579}, issn = {2405-8440}, abstract = {OBJECTIVES: To describe the processes developed by The Hospital for Sick Children (SickKids) to enable utilization of electronic health record (EHR) data by creating sequentially transformed schemas for use across multiple user types.

METHODS: We used Microsoft Azure as the cloud service provider and named this effort the SickKids Enterprise-wide Data in Azure Repository (SEDAR). Epic Clarity data from on-premises was copied to a virtual network in Microsoft Azure. Three sequential schemas were developed. The Filtered Schema added a filter to retain only SickKids and valid patients. The Curated Schema created a data structure that was easier to navigate and query. Each table contained a logical unit such as patients, hospital encounters or laboratory tests. Data validation of randomly sampled observations in the Curated Schema was performed. The SK-OMOP Schema was designed to facilitate research and machine learning. Two individuals mapped medical elements to standard Observational Medical Outcomes Partnership (OMOP) concepts.

RESULTS: A copy of Clarity data was transferred to Microsoft Azure and updated each night using log shipping. The Filtered Schema and Curated Schema were implemented as stored procedures and executed each night with incremental updates or full loads. Data validation required up to 16 iterations for each Curated Schema table. OMOP concept mapping achieved at least 80 % coverage for each SK-OMOP table.

CONCLUSIONS: We described our experience in creating three sequential schemas to address different EHR data access requirements. Future work should consider replicating this approach at other institutions to determine whether approaches are generalizable.}, } @article {pmid38022923, year = {2023}, author = {Han, J and Sun, R and Zeeshan, M and Rehman, A and Ullah, I}, title = {The impact of digital transformation on green total factor productivity of heavily polluting enterprises.}, journal = {Frontiers in psychology}, volume = {14}, number = {}, pages = {1265391}, pmid = {38022923}, issn = {1664-1078}, abstract = {INTRODUCTION: Digital transformation has become an important engine for economic high-quality development and environment high-level protection. However, green total factor productivity (GTFP), as an indicator that comprehensively reflects economic and environmental benefits, there is a lack of studies that analyze the effect of digital transformation on heavily polluting enterprises' GTFP from a micro perspective, and its impact mechanism is still unclear. Therefore, we aim to study the impact of digital transformation on heavily polluting enterprises' GTFP and its mechanism, and explore the heterogeneity of its impact.

METHODS: We use Chinese A-share listed enterprises in the heavily polluting industry data from 2007 to 2019, measure enterprise digital transformation indicator using text analysis, and measure enterprise GTFP indicator using the GML index based on SBM directional distance function, to investigate the impact of digital transformation on heavily polluting enterprises' GTFP.

RESULTS: Digital transformation can significantly enhance heavily polluting enterprises' GTFP, and this finding still holds after considering the endogenous problem and conducting robustness tests. Digital transformation can enhance heavily polluting enterprises' GTFP by promoting green innovation, improving management efficiency, and reducing external transaction costs. The improvement role of digital transformation on heavily polluting enterprises' GTFP is more obvious in the samples of non-state-owned enterprises, non-high-tech industries, and the eastern region. Compared with blockchain technology, artificial intelligence technology, cloud computing technology, big data technology, and digital technology application can significantly improve heavily polluting enterprises' GTFP.

DISCUSSION: Our paper breaks through the limitations of existing research, which not only theoretically enriches the literature related to digital transformation and GTFP, but also practically provides policy implications for continuously promoting heavily polluting enterprises' digital transformation and facilitating their high-quality development.}, } @article {pmid38020047, year = {2023}, author = {Ko, HYK and Tripathi, NK and Mozumder, C and Muengtaweepongsa, S and Pal, I}, title = {Real-Time Remote Patient Monitoring and Alarming System for Noncommunicable Lifestyle Diseases.}, journal = {International journal of telemedicine and applications}, volume = {2023}, number = {}, pages = {9965226}, pmid = {38020047}, issn = {1687-6415}, abstract = {Telemedicine and remote patient monitoring (RPM) systems have been gaining interest and received adaptation in healthcare sectors since the COVID-19 pandemic due to their efficiency and capability to deliver timely healthcare services while containing COVID-19 transmission. These systems were developed using the latest technology in wireless sensors, medical devices, cloud computing, mobile computing, telecommunications, and machine learning technologies. In this article, a real-time remote patient monitoring system is proposed with an accessible, compact, accurate, and low-cost design. The implemented system is designed to an end-to-end communication interface between medical practitioners and patients. The objective of this study is to provide remote healthcare services to patients who need ongoing care or those who have been discharged from the hospital without affecting their daily routines. The developed monitoring system was then evaluated on 1177 records from MIMIC-III clinical dataset (aged between 19 and 99 years). The performance analysis of the proposed system achieved 88.7% accuracy in generating alerts with logistic regression classification algorithm. This result reflects positively on the quality and robustness of the proposed study. Since the processing time of the proposed system is less than 2 minutes, it can be stated that the system has a high computational speed and is convenient to use in real-time monitoring. Furthermore, the proposed system will fulfil to cover the lower doctor-to-patient ratio by monitoring patients from remote locations and aged people who reside in their residences.}, } @article {pmid38006682, year = {2024}, author = {Geroski, T and Gkaintes, O and Vulović, A and Ukaj, N and Barrasa-Fano, J and Perez-Boerema, F and Milićević, B and Atanasijević, A and Živković, J and Živić, A and Roumpi, M and Exarchos, T and Hellmich, C and Scheiner, S and Van Oosterwyck, H and Jakovljević, D and Ivanović, M and Filipović, N}, title = {SGABU computational platform for multiscale modeling: Bridging the gap between education and research.}, journal = {Computer methods and programs in biomedicine}, volume = {243}, number = {}, pages = {107935}, doi = {10.1016/j.cmpb.2023.107935}, pmid = {38006682}, issn = {1872-7565}, mesh = {*Software ; *User-Computer Interface ; Computer Simulation ; Language ; Workflow ; Computational Biology/methods ; }, abstract = {BACKGROUND AND OBJECTIVE: In accordance with the latest aspirations in the field of bioengineering, there is a need to create a web accessible, but powerful cloud computational platform that combines datasets and multiscale models related to bone modeling, cancer, cardiovascular diseases and tissue engineering. The SGABU platform may become a powerful information system for research and education that can integrate data, extract information, and facilitate knowledge exchange with the goal of creating and developing appropriate computing pipelines to provide accurate and comprehensive biological information from the molecular to organ level.

METHODS: The datasets integrated into the platform are obtained from experimental and/or clinical studies and are mainly in tabular or image file format, including metadata. The implementation of multiscale models, is an ambitious effort of the platform to capture phenomena at different length scales, described using partial and ordinary differential equations, which are solved numerically on complex geometries with the use of the finite element method. The majority of the SGABU platform's simulation pipelines are provided as Common Workflow Language (CWL) workflows. Each of them requires creating a CWL implementation on the backend and a user-friendly interface using standard web technologies. Platform is available at https://sgabu-test.unic.kg.ac.rs/login.

RESULTS: The main dashboard of the SGABU platform is divided into sections for each field of research, each one of which includes a subsection of datasets and multiscale models. The datasets can be presented in a simple form as tabular data, or using technologies such as Plotly.js for 2D plot interactivity, Kitware Paraview Glance for 3D view. Regarding the models, the usage of Docker containerization for packing the individual tools and CWL orchestration for describing inputs with validation forms and outputs with tabular views for output visualization, interactive diagrams, 3D views and animations.

CONCLUSIONS: In practice, the structure of SGABU platform means that any of the integrated workflows can work equally well on any other bioengineering platform. The key advantage of the SGABU platform over similar efforts is its versatility offered with the use of modern, modular, and extensible technology for various levels of architecture.}, } @article {pmid38005614, year = {2023}, author = {Zhang, T and Jin, X and Bai, S and Peng, Y and Li, Y and Zhang, J}, title = {Smart Public Transportation Sensing: Enhancing Perception and Data Management for Efficient and Safety Operations.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {22}, pages = {}, pmid = {38005614}, issn = {1424-8220}, support = {No. KCXST20221021111201002//Science and Technology Innovation Committee of Shenzhen/ ; }, abstract = {The use of cloud computing, big data, IoT, and mobile applications in the public transportation industry has resulted in the generation of vast and complex data, of which the large data volume and data variety have posed several obstacles to effective data sensing and processing with high efficiency in a real-time data-driven public transportation management system. To overcome the above-mentioned challenges and to guarantee optimal data availability for data sensing and processing in public transportation perception, a public transportation sensing platform is proposed to collect, integrate, and organize diverse data from different data sources. The proposed data perception platform connects multiple data systems and some edge intelligent perception devices to enable the collection of various types of data, including traveling information of passengers and transaction data of smart cards. To enable the efficient extraction of precise and detailed traveling behavior, an efficient field-level data lineage exploration method is proposed during logical plan generation and is integrated into the FlinkSQL system seamlessly. Furthermore, a row-level fine-grained permission control mechanism is adopted to support flexible data management. With these two techniques, the proposed data management system can support efficient data processing on large amounts of data and conducts comprehensive analysis and application of business data from numerous different sources to realize the value of the data with high data safety. Through operational testing in real environments, the proposed platform has proven highly efficient and effective in managing organizational operations, data assets, data life cycle, offline development, and backend administration over a large amount of various types of public transportation traffic data.}, } @article {pmid38005586, year = {2023}, author = {Nugroho, AK and Shioda, S and Kim, T}, title = {Optimal Resource Provisioning and Task Offloading for Network-Aware and Federated Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {22}, pages = {}, pmid = {38005586}, issn = {1424-8220}, support = {2021R1F1A1059109//National Research Foundation of Korea/ ; Research Grant, 2022//Pusan National University/ ; }, abstract = {Compared to cloud computing, mobile edge computing (MEC) is a promising solution for delay-sensitive applications due to its proximity to end users. Because of its ability to offload resource-intensive tasks to nearby edge servers, MEC allows a diverse range of compute- and storage-intensive applications to operate on resource-constrained devices. The optimal utilization of MEC can lead to enhanced responsiveness and quality of service, but it requires careful design from the perspective of user-base station association, virtualized resource provisioning, and task distribution. Also, considering the limited exploration of the federation concept in the existing literature, its impacts on the allocation and management of resources still remain not widely recognized. In this paper, we study the network and MEC resource scheduling problem, where some edge servers are federated, limiting resource expansion within the same federations. The integration of network and MEC is crucial, emphasizing the necessity of a joint approach. In this work, we present NAFEOS, a proposed solution formulated as a two-stage algorithm that can effectively integrate association optimization with vertical and horizontal scaling. The Stage-1 problem optimizes the user-base station association and federation assignment so that the edge servers can be utilized in a balanced manner. The following Stage-2 dynamically schedules both vertical and horizontal scaling so that the fluctuating task-offloading demands from users are fulfilled. The extensive evaluations and comparison results show that the proposed approach can effectively achieve optimal resource utilization.}, } @article {pmid38005558, year = {2023}, author = {Oliveira, M and Chauhan, S and Pereira, F and Felgueiras, C and Carvalho, D}, title = {Blockchain Protocols and Edge Computing Targeting Industry 5.0 Needs.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {22}, pages = {}, pmid = {38005558}, issn = {1424-8220}, abstract = {"Industry 5.0" is the latest industrial revolution. A variety of cutting-edge technologies, including artificial intelligence, the Internet of Things (IoT), and others, come together to form it. Billions of devices are connected for high-speed data transfer, especially in a 5G-enabled industrial environment for information collection and processing. Most of the issues, such as access control mechanism, time to fetch the data from different devices, and protocols used, may not be applicable in the future as these protocols are based upon a centralized mechanism. This centralized mechanism may have a single point of failure along with the computational overhead. Thus, there is a need for an efficient decentralized access control mechanism for device-to-device (D2D) communication in various industrial sectors, for example, sensors in different regions may collect and process the data for making intelligent decisions. In such an environment, reliability, security, and privacy are major concerns as most of the solutions are based upon a centralized control mechanism. To mitigate the aforementioned issues, this paper provides the opportunities for and highlights some of the most impressive initiatives that help to curve the future. This new era will bring about significant changes in the way businesses operate, allowing them to become more cost-effective, more efficient, and produce higher-quality goods and services. As sensors are getting more accurate, cheaper, and have lower time responses, 5G networks are being integrated, and more industrial equipment and machinery are becoming available; hence, various sectors, including the manufacturing sector, are going through a significant period of transition right now. Additionally, the emergence of the cloud enables modern production models that use the cloud (both internal and external services), networks, and systems to leverage the cloud's low cost, scalability, increased computational power, real-time communication, and data transfer capabilities to create much smarter and more autonomous systems. We discuss the ways in which decentralized networks that make use of protocols help to achieve decentralization and how network meshes can grow to make things more secure, reliable, and cohere with these technologies, which are not going away anytime soon. We emphasize the significance of new design in regard to cybersecurity, data integrity, and storage by using straightforward examples that have the potential to lead to the excellence of distributed systems. This groundbreaking paper delves deep into the world of industrial automation and explores the possibilities to adopt blockchain for developing solutions for smart cities, smart homes, healthcare, smart agriculture, autonomous vehicles, and supply chain management within Industry 5.0. With an in-depth examination of various consensus mechanisms, readers gain a comprehensive understanding of the latest developments in this field. The paper also explores the current issues and challenges associated with blockchain adaptation for industrial automation and provides a thorough comparison of the available consensus, enabling end customers to select the most suitable one based on its unique advantages. Case studies highlight how to enable the adoption of blockchain in Industry 5.0 solutions effectively and efficiently, offering valuable insights into the potential challenges that lie ahead, particularly for smart industrial applications.}, } @article {pmid38004827, year = {2023}, author = {Kim, J and Koh, H}, title = {MiTree: A Unified Web Cloud Analytic Platform for User-Friendly and Interpretable Microbiome Data Mining Using Tree-Based Methods.}, journal = {Microorganisms}, volume = {11}, number = {11}, pages = {}, pmid = {38004827}, issn = {2076-2607}, support = {2021R1C1C1013861//National Research Foundation of Korea/ ; }, abstract = {The advent of next-generation sequencing has greatly accelerated the field of human microbiome studies. Currently, investigators are seeking, struggling and competing to find new ways to diagnose, treat and prevent human diseases through the human microbiome. Machine learning is a promising approach to help such an effort, especially due to the high complexity of microbiome data. However, many of the current machine learning algorithms are in a "black box", i.e., they are difficult to understand and interpret. In addition, clinicians, public health practitioners and biologists are not usually skilled at computer programming, and they do not always have high-end computing devices. Thus, in this study, we introduce a unified web cloud analytic platform, named MiTree, for user-friendly and interpretable microbiome data mining. MiTree employs tree-based learning methods, including decision tree, random forest and gradient boosting, that are well understood and suited to human microbiome studies. We also stress that MiTree can address both classification and regression problems through covariate-adjusted or unadjusted analysis. MiTree should serve as an easy-to-use and interpretable data mining tool for microbiome-based disease prediction modeling, and should provide new insights into microbiome-based diagnostics, treatment and prevention. MiTree is an open-source software that is available on our web server.}, } @article {pmid37987882, year = {2023}, author = {Bahadur, FT and Shah, SR and Nidamanuri, RR}, title = {Applications of remote sensing vis-à-vis machine learning in air quality monitoring and modelling: a review.}, journal = {Environmental monitoring and assessment}, volume = {195}, number = {12}, pages = {1502}, pmid = {37987882}, issn = {1573-2959}, mesh = {*Artificial Intelligence ; Remote Sensing Technology ; Environmental Monitoring ; *Air Pollution ; Machine Learning ; }, abstract = {Environmental contamination especially air pollution is an exponentially growing menace requiring immediate attention, as it lingers on with the associated risks of health, economic and ecological crisis. The special focus of this study is on the advances in Air Quality (AQ) monitoring using modern sensors, integrated monitoring systems, remote sensing and the usage of Machine Learning (ML), Deep Learning (DL) algorithms, artificial neural networks, recent computational techniques, hybridizing techniques and different platforms available for AQ modelling. The modern world is data-driven, where critical decisions are taken based on the available and accessible data. Today's data analytics is a consequence of the information explosion we have reached. The current research also tends to re-evaluate its scope with data analytics. The emergence of artificial intelligence and machine learning in the research scenario has radically changed the methodologies and approaches of modern research. The aim of this review is to assess the impact of data analytics such as ML/DL frameworks, data integration techniques, advanced statistical modelling, cloud computing platforms and constantly improving optimization algorithms on AQ research. The usage of remote sensing in AQ monitoring along with providing enormous datasets is constantly filling the spatial gaps of ground stations, as the long-term air pollutant dynamics is best captured by the panoramic view of satellites. Remote sensing coupled with the techniques of ML/DL has the most impact in shaping the modern trends in AQ research. Current standing of research in this field, emerging trends and future scope are also discussed.}, } @article {pmid37979853, year = {2024}, author = {Wilkinson, R and Mleczko, MM and Brewin, RJW and Gaston, KJ and Mueller, M and Shutler, JD and Yan, X and Anderson, K}, title = {Environmental impacts of earth observation data in the constellation and cloud computing era.}, journal = {The Science of the total environment}, volume = {909}, number = {}, pages = {168584}, doi = {10.1016/j.scitotenv.2023.168584}, pmid = {37979853}, issn = {1879-1026}, abstract = {Numbers of Earth Observation (EO) satellites have increased exponentially over the past decade reaching the current population of 1193 (January 2023). Consequently, EO data volumes have mushroomed and data storage and processing have migrated to the cloud. Whilst attention has been given to the launch and in-orbit environmental impacts of satellites, EO data environmental footprints have been overlooked. These issues require urgent attention given data centre water and energy consumption, high carbon emissions for computer component manufacture, and difficulty of recycling computer components. Doing so is essential if the environmental good of EO is to withstand scrutiny. We provide the first assessment of the EO data life-cycle and estimate that the current size of the global EO data collection is ~807 PB, increasing by ~100 PB/year. Storage of this data volume generates annual CO2 equivalent emissions of 4101 t. Major state-funded EO providers use 57 of their own data centres globally, and a further 178 private cloud services, with considerable duplication of datasets across repositories. We explore scenarios for the environmental cost of performing EO functions on the cloud compared to desktop machines. A simple band arithmetic function applied to a Landsat 9 scene using Google Earth Engine (GEE) generated CO2 equivalent (e) emissions of 0.042-0.69 g CO2e (locally) and 0.13-0.45 g CO2e (European data centre; values multiply by nine for Australian data centre). Computation-based emissions scale rapidly for more intense processes and when testing code. When using cloud services such as GEE, users have no choice about the data centre used and we push for EO providers to be more transparent about the location-specific impacts of EO work, and to provide tools for measuring the environmental cost of cloud computation. The EO community as a whole needs to critically consider the broad suite of EO data life-cycle impacts.}, } @article {pmid37979340, year = {2023}, author = {Tomassini, S and Falcionelli, N and Bruschi, G and Sbrollini, A and Marini, N and Sernani, P and Morettini, M and Müller, H and Dragoni, AF and Burattini, L}, title = {On-cloud decision-support system for non-small cell lung cancer histology characterization from thorax computed tomography scans.}, journal = {Computerized medical imaging and graphics : the official journal of the Computerized Medical Imaging Society}, volume = {110}, number = {}, pages = {102310}, doi = {10.1016/j.compmedimag.2023.102310}, pmid = {37979340}, issn = {1879-0771}, mesh = {Humans ; *Carcinoma, Non-Small-Cell Lung/diagnostic imaging/pathology ; *Lung Neoplasms/diagnostic imaging/pathology ; *Carcinoma, Squamous Cell/pathology ; Tomography, X-Ray Computed/methods ; ROC Curve ; }, abstract = {Non-Small Cell Lung Cancer (NSCLC) accounts for about 85% of all lung cancers. Developing non-invasive techniques for NSCLC histology characterization may not only help clinicians to make targeted therapeutic treatments but also prevent subjects from undergoing lung biopsy, which is challenging and could lead to clinical implications. The motivation behind the study presented here is to develop an advanced on-cloud decision-support system, named LUCY, for non-small cell LUng Cancer histologY characterization directly from thorax Computed Tomography (CT) scans. This aim was pursued by selecting thorax CT scans of 182 LUng ADenocarcinoma (LUAD) and 186 LUng Squamous Cell carcinoma (LUSC) subjects from four openly accessible data collections (NSCLC-Radiomics, NSCLC-Radiogenomics, NSCLC-Radiomics-Genomics and TCGA-LUAD), in addition to the implementation and comparison of two end-to-end neural networks (the core layer of whom is a convolutional long short-term memory layer), the performance evaluation on test dataset (NSCLC-Radiomics-Genomics) from a subject-level perspective in relation to NSCLC histological subtype location and grade, and the dynamic visual interpretation of the achieved results by producing and analyzing one heatmap video for each scan. LUCY reached test Area Under the receiver operating characteristic Curve (AUC) values above 77% in all NSCLC histological subtype location and grade groups, and a best AUC value of 97% on the entire dataset reserved for testing, proving high generalizability to heterogeneous data and robustness. Thus, LUCY is a clinically-useful decision-support system able to timely, non-invasively and reliably provide visually-understandable predictions on LUAD and LUSC subjects in relation to clinically-relevant information.}, } @article {pmid37961077, year = {2023}, author = {Kwabla, W and Dinc, F and Oumimoun, K and Kockara, S and Halic, T and Demirel, D and Arikatla, S and Ahmadi, S}, title = {Evaluation of WebRTC in the Cloud for Surgical Simulations: A case study on Virtual Rotator Cuff Arthroscopic Skill Trainer (ViRCAST).}, journal = {Learning and collaboration technologies : 10th International Conference, LCT 2023, held as part of the 25th HCI International Conference, HCII 2023, Copenhagen, Denmark, July 23-28, 2023, proceedings. Part II. LCT (Conference) (10th : 2...}, volume = {14041}, number = {}, pages = {127-143}, pmid = {37961077}, support = {R01 EB005807/EB/NIBIB NIH HHS/United States ; R01 EB025241/EB/NIBIB NIH HHS/United States ; P20 GM103429/GM/NIGMS NIH HHS/United States ; R44 AR075481/AR/NIAMS NIH HHS/United States ; R01 EB033674/EB/NIBIB NIH HHS/United States ; }, abstract = {Web Real-Time Communication (WebRTC) is an open-source technology which enables remote peer-to-peer video and audio connection. It has quickly become the new standard for real-time communications over the web and is commonly used as a video conferencing platform. In this study, we present a different application domain which may greatly benefit from WebRTC technology, that is virtual reality (VR) based surgical simulations. Virtual Rotator Cuff Arthroscopic Skill Trainer (ViRCAST) is our testing platform that we completed preliminary feasibility studies for WebRTC. Since the elasticity of cloud computing provides the ability to meet possible future hardware/software requirements and demand growth, ViRCAST is deployed in a cloud environment. Additionally, in order to have plausible simulations and interactions, any VR-based surgery simulator must have haptic feedback. Therefore, we implemented an interface to WebRTC for integrating haptic devices. We tested ViRCAST on Google cloud through haptic-integrated WebRTC at various client configurations. Our experiments showed that WebRTC with cloud and haptic integrations is a feasible solution for VR-based surgery simulators. From our experiments, the WebRTC integrated simulation produced an average frame rate of 33 fps, and the hardware integration produced an average lag of 0.7 milliseconds in real-time.}, } @article {pmid37960657, year = {2023}, author = {Farooq, MS and Abdullah, M and Riaz, S and Alvi, A and Rustam, F and Flores, MAL and Galán, JC and Samad, MA and Ashraf, I}, title = {A Survey on the Role of Industrial IoT in Manufacturing for Implementation of Smart Industry.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {21}, pages = {}, pmid = {37960657}, issn = {1424-8220}, support = {N/A//the European University of the Atlantic/ ; }, abstract = {The Internet of Things (IoT) is an innovative technology that presents effective and attractive solutions to revolutionize various domains. Numerous solutions based on the IoT have been designed to automate industries, manufacturing units, and production houses to mitigate human involvement in hazardous operations. Owing to the large number of publications in the IoT paradigm, in particular those focusing on industrial IoT (IIoT), a comprehensive survey is significantly important to provide insights into recent developments. This survey presents the workings of the IoT-based smart industry and its major components and proposes the state-of-the-art network infrastructure, including structured layers of IIoT architecture, IIoT network topologies, protocols, and devices. Furthermore, the relationship between IoT-based industries and key technologies is analyzed, including big data storage, cloud computing, and data analytics. A detailed discussion of IIoT-based application domains, smartphone application solutions, and sensor- and device-based IIoT applications developed for the management of the smart industry is also presented. Consequently, IIoT-based security attacks and their relevant countermeasures are highlighted. By analyzing the essential components, their security risks, and available solutions, future research directions regarding the implementation of IIoT are outlined. Finally, a comprehensive discussion of open research challenges and issues related to the smart industry is also presented.}, } @article {pmid37960612, year = {2023}, author = {Leng, J and Chen, X and Zhao, J and Wang, C and Zhu, J and Yan, Y and Zhao, J and Shi, W and Zhu, Z and Jiang, X and Lou, Y and Feng, C and Yang, Q and Xu, F}, title = {A Light Vehicle License-Plate-Recognition System Based on Hybrid Edge-Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {21}, pages = {}, pmid = {37960612}, issn = {1424-8220}, support = {No.ZR2022MF289//Shandong Provincial Natural Science Foundation/ ; ZR2019MA037//Shandong Provincial Natural Science Foundation/ ; No.62271293//National Natural Science Foundation of China/ ; No.2021GXRC071//2021 Jinan City "20 New Universities" Support Project/ ; No.2021yb08//Qilu University of Technology 2021 Campus General Teaching Reform Project/ ; No. P202204//Qilu University of Technology 2022 Talent Training and Teaching Reform Project/ ; }, abstract = {With the world moving towards low-carbon and environmentally friendly development, the rapid growth of new-energy vehicles is evident. The utilization of deep-learning-based license-plate-recognition (LPR) algorithms has become widespread. However, existing LPR systems have difficulty achieving timely, effective, and energy-saving recognition due to their inherent limitations such as high latency and energy consumption. An innovative Edge-LPR system that leverages edge computing and lightweight network models is proposed in this paper. With the help of this technology, the excessive reliance on the computational capacity and the uneven implementation of resources of cloud computing can be successfully mitigated. The system is specifically a simple LPR. Channel pruning was used to reconstruct the backbone layer, reduce the network model parameters, and effectively reduce the GPU resource consumption. By utilizing the computing resources of the Intel second-generation computing stick, the network models were deployed on edge gateways to detect license plates directly. The reliability and effectiveness of the Edge-LPR system were validated through the experimental analysis of the CCPD standard dataset and real-time monitoring dataset from charging stations. The experimental results from the CCPD common dataset demonstrated that the network's total number of parameters was only 0.606 MB, with an impressive accuracy rate of 97%.}, } @article {pmid37960584, year = {2023}, author = {Younas, MI and Iqbal, MJ and Aziz, A and Sodhro, AH}, title = {Toward QoS Monitoring in IoT Edge Devices Driven Healthcare-A Systematic Literature Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {21}, pages = {}, pmid = {37960584}, issn = {1424-8220}, support = {2020VBC0002//PIFI 2020 (2020VBC0002), China/ ; }, mesh = {Humans ; *Artificial Intelligence ; Cloud Computing ; *Disasters ; Industry ; Delivery of Health Care ; }, abstract = {Smart healthcare is altering the delivery of healthcare by combining the benefits of IoT, mobile, and cloud computing. Cloud computing has tremendously helped the health industry connect healthcare facilities, caregivers, and patients for information sharing. The main drivers for implementing effective healthcare systems are low latency and faster response times. Thus, quick responses among healthcare organizations are important in general, but in an emergency, significant latency at different stakeholders might result in disastrous situations. Thus, cutting-edge approaches like edge computing and artificial intelligence (AI) can deal with such problems. A packet cannot be sent from one location to another unless the "quality of service" (QoS) specifications are met. The term QoS refers to how well a service works for users. QoS parameters like throughput, bandwidth, transmission delay, availability, jitter, latency, and packet loss are crucial in this regard. Our focus is on the individual devices present at different levels of the smart healthcare infrastructure and the QoS requirements of the healthcare system as a whole. The contribution of this paper is five-fold: first, a novel pre-SLR method for comprehensive keyword research on subject-related themes for mining pertinent research papers for quality SLR; second, SLR on QoS improvement in smart healthcare apps; third a review of several QoS techniques used in current smart healthcare apps; fourth, the examination of the most important QoS measures in contemporary smart healthcare apps; fifth, offering solutions to the problems encountered in delivering QoS in smart healthcare IoT applications to improve healthcare services.}, } @article {pmid37960453, year = {2023}, author = {Abbas, Q and Ahmad, G and Alyas, T and Alghamdi, T and Alsaawy, Y and Alzahrani, A}, title = {Revolutionizing Urban Mobility: IoT-Enhanced Autonomous Parking Solutions with Transfer Learning for Smart Cities.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {21}, pages = {}, pmid = {37960453}, issn = {1424-8220}, abstract = {Smart cities have emerged as a specialized domain encompassing various technologies, transitioning from civil engineering to technology-driven solutions. The accelerated development of technologies, such as the Internet of Things (IoT), software-defined networks (SDN), 5G, artificial intelligence, cognitive science, and analytics, has played a crucial role in providing solutions for smart cities. Smart cities heavily rely on devices, ad hoc networks, and cloud computing to integrate and streamline various activities towards common goals. However, the complexity arising from multiple cloud service providers offering myriad services necessitates a stable and coherent platform for sustainable operations. The Smart City Operational Platform Ecology (SCOPE) model has been developed to address the growing demands, and incorporates machine learning, cognitive correlates, ecosystem management, and security. SCOPE provides an ecosystem that establishes a balance for achieving sustainability and progress. In the context of smart cities, Internet of Things (IoT) devices play a significant role in enabling automation and data capture. This research paper focuses on a specific module of SCOPE, which deals with data processing and learning mechanisms for object identification in smart cities. Specifically, it presents a car parking system that utilizes smart identification techniques to identify vacant slots. The learning controller in SCOPE employs a two-tier approach, and utilizes two different models, namely Alex Net and YOLO, to ensure procedural stability and improvement.}, } @article {pmid37954389, year = {2023}, author = {Biswas, J and Jobaer, MA and Haque, SF and Islam Shozib, MS and Limon, ZA}, title = {Mapping and monitoring land use land cover dynamics employing Google Earth Engine and machine learning algorithms on Chattogram, Bangladesh.}, journal = {Heliyon}, volume = {9}, number = {11}, pages = {e21245}, pmid = {37954389}, issn = {2405-8440}, abstract = {Land use land cover change (LULC) significantly impacts urban sustainability, urban planning, climate change, natural resource management, and biodiversity. The Chattogram Metropolitan Area (CMA) has been going through rapid urbanization, which has impacted the LULC transformation and accelerated the growth of urban sprawl and unplanned development. To map those urban sprawls and natural resources depletion, this study aims to monitor the LULC change using Landsat satellite imagery from 2003 to 2023 in the cloud-based remote sensing platform Google Earth Engine (GEE). LULC has been classified into five distinct classes: waterbody, build-up, bare land, dense vegetation, and cropland, employing four machine learning algorithms (random forest, gradient tree boost, classification & regression tree, and support vector machine) in the GEE platform. The overall accuracy (kappa statistics) and the receiver operating characteristic (ROC) curve have demonstrated satisfactory results. The results indicate that the CART model outperforms other LULC models when considering efficiency and accuracy in the designated study region. The analysis of LULC conversions revealed notable trends, patterns, and magnitudes across all periods: 2003-2013, 2013-2023, and 2003-2023. The expansion of unregulated built-up areas and the decline of croplands emerged as primary concerns. However, there was a positive indication of a significant increase in dense vegetation within the study area over the 20 years.}, } @article {pmid37946898, year = {2023}, author = {Healthcare Engineering, JO}, title = {Retracted: Sports Training Teaching Device Based on Big Data and Cloud Computing.}, journal = {Journal of healthcare engineering}, volume = {2023}, number = {}, pages = {9795604}, pmid = {37946898}, issn = {2040-2309}, abstract = {[This retracts the article DOI: 10.1155/2021/7339486.].}, } @article {pmid37946860, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: Real-Time Detection of Body Nutrition in Sports Training Based on Cloud Computing and Somatosensory Network.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9784817}, pmid = {37946860}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/9911905.].}, } @article {pmid37942151, year = {2023}, author = {Faruqui, N and Yousuf, MA and Kateb, FA and Abdul Hamid, M and Monowar, MM}, title = {Healthcare As a Service (HAAS): CNN-based cloud computing model for ubiquitous access to lung cancer diagnosis.}, journal = {Heliyon}, volume = {9}, number = {11}, pages = {e21520}, pmid = {37942151}, issn = {2405-8440}, abstract = {The field of automated lung cancer diagnosis using Computed Tomography (CT) scans has been significantly advanced by the precise predictions offered by Convolutional Neural Network (CNN)-based classifiers. Critical areas of study include improving image quality, optimizing learning algorithms, and enhancing diagnostic accuracy. To facilitate a seamless transition from research laboratories to real-world applications, it is crucial to improve the technology's usability-a factor often neglected in current state-of-the-art research. Yet, current state-of-the-art research in this field frequently overlooks the need for expediting this process. This paper introduces Healthcare-As-A-Service (HAAS), an innovative concept inspired by Software-As-A-Service (SAAS) within the cloud computing paradigm. As a comprehensive lung cancer diagnosis service system, HAAS has the potential to reduce lung cancer mortality rates by providing early diagnosis opportunities to everyone. We present HAASNet, a cloud-compatible CNN that boasts an accuracy rate of 96.07%. By integrating HAASNet predictions with physio-symptomatic data from the Internet of Medical Things (IoMT), the proposed HAAS model generates accurate and reliable lung cancer diagnosis reports. Leveraging IoMT and cloud technology, the proposed service is globally accessible via the Internet, transcending geographic boundaries. This groundbreaking lung cancer diagnosis service achieves average precision, recall, and F1-scores of 96.47%, 95.39%, and 94.81%, respectively.}, } @article {pmid37941779, year = {2023}, author = {Wang, C and Dai, W}, title = {Lung nodule segmentation via semi-residual multi-resolution neural networks.}, journal = {Open life sciences}, volume = {18}, number = {1}, pages = {20220727}, pmid = {37941779}, issn = {2391-5412}, abstract = {The integration of deep neural networks and cloud computing has become increasingly prevalent within the domain of medical image processing, facilitated by the recent strides in neural network theory and the advent of the internet of things (IoTs). This juncture has led to the emergence of numerous image segmentation networks and innovative solutions that facilitate medical practitioners in diagnosing lung cancer. Within the contours of this study, we present an end-to-end neural network model, christened as the "semi-residual Multi-resolution Convolutional Neural Network" (semi-residual MCNN), devised to engender precise lung nodule segmentation maps within the milieu of cloud computing. Central to the architecture are three pivotal features, each coalescing to effectuate a notable enhancement in predictive accuracy: the incorporation of semi-residual building blocks, the deployment of group normalization techniques, and the orchestration of multi-resolution output heads. This innovative model is systematically subjected to rigorous training and testing regimes, using the LIDC-IDRI dataset - a widely embraced and accessible repository - comprising a diverse ensemble of 1,018 distinct lung CT images tailored to the realm of lung nodule segmentation.}, } @article {pmid37937074, year = {2023}, author = {Wadford, DA and Baumrind, N and Baylis, EF and Bell, JM and Bouchard, EL and Crumpler, M and Foote, EM and Gilliam, S and Glaser, CA and Hacker, JK and Ledin, K and Messenger, SL and Morales, C and Smith, EA and Sevinsky, JR and Corbett-Detig, RB and DeRisi, J and Jacobson, K}, title = {Implementation of California COVIDNet - a multi-sector collaboration for statewide SARS-CoV-2 genomic surveillance.}, journal = {Frontiers in public health}, volume = {11}, number = {}, pages = {1249614}, pmid = {37937074}, issn = {2296-2565}, support = {U01 CK000539/CK/NCEZID CDC HHS/United States ; U01CK000539/ACL/ACL HHS/United States ; }, mesh = {Humans ; *SARS-CoV-2/genetics ; *COVID-19/epidemiology ; Genomics ; California/epidemiology ; Data Management ; }, abstract = {INTRODUCTION: The SARS-CoV-2 pandemic represented a formidable scientific and technological challenge to public health due to its rapid spread and evolution. To meet these challenges and to characterize the virus over time, the State of California established the California SARS-CoV-2 Whole Genome Sequencing (WGS) Initiative, or "California COVIDNet". This initiative constituted an unprecedented multi-sector collaborative effort to achieve large-scale genomic surveillance of SARS-CoV-2 across California to monitor the spread of variants within the state, to detect new and emerging variants, and to characterize outbreaks in congregate, workplace, and other settings.

METHODS: California COVIDNet consists of 50 laboratory partners that include public health laboratories, private clinical diagnostic laboratories, and academic sequencing facilities as well as expert advisors, scientists, consultants, and contractors. Data management, sample sourcing and processing, and computational infrastructure were major challenges that had to be resolved in the midst of the pandemic chaos in order to conduct SARS-CoV-2 genomic surveillance. Data management, storage, and analytics needs were addressed with both conventional database applications and newer cloud-based data solutions, which also fulfilled computational requirements.

RESULTS: Representative and randomly selected samples were sourced from state-sponsored community testing sites. Since March of 2021, California COVIDNet partners have contributed more than 450,000 SARS-CoV-2 genomes sequenced from remnant samples from both molecular and antigen tests. Combined with genomes from CDC-contracted WGS labs, there are currently nearly 800,000 genomes from all 61 local health jurisdictions (LHJs) in California in the COVIDNet sequence database. More than 5% of all reported positive tests in the state have been sequenced, with similar rates of sequencing across 5 major geographic regions in the state.

DISCUSSION: Implementation of California COVIDNet revealed challenges and limitations in the public health system. These were overcome by engaging in novel partnerships that established a successful genomic surveillance program which provided valuable data to inform the COVID-19 public health response in California. Significantly, California COVIDNet has provided a foundational data framework and computational infrastructure needed to respond to future public health crises.}, } @article {pmid37933859, year = {2024}, author = {Varadi, M and Bertoni, D and Magana, P and Paramval, U and Pidruchna, I and Radhakrishnan, M and Tsenkov, M and Nair, S and Mirdita, M and Yeo, J and Kovalevskiy, O and Tunyasuvunakool, K and Laydon, A and Žídek, A and Tomlinson, H and Hariharan, D and Abrahamson, J and Green, T and Jumper, J and Birney, E and Steinegger, M and Hassabis, D and Velankar, S}, title = {AlphaFold Protein Structure Database in 2024: providing structure coverage for over 214 million protein sequences.}, journal = {Nucleic acids research}, volume = {52}, number = {D1}, pages = {D368-D375}, pmid = {37933859}, issn = {1362-4962}, support = {//Google DeepMind/ ; 2019R1A6A1A10073437//National Research Foundation of Korea/ ; //Samsung DS Research Fund/ ; //Seoul National University/ ; RS-2023-00250470//National Research Foundation of Korea/ ; }, mesh = {Amino Acid Sequence ; *Artificial Intelligence ; Databases, Protein ; *Proteome ; Search Engine ; Proteins/chemistry ; *Protein Structure, Secondary ; }, abstract = {The AlphaFold Database Protein Structure Database (AlphaFold DB, https://alphafold.ebi.ac.uk) has significantly impacted structural biology by amassing over 214 million predicted protein structures, expanding from the initial 300k structures released in 2021. Enabled by the groundbreaking AlphaFold2 artificial intelligence (AI) system, the predictions archived in AlphaFold DB have been integrated into primary data resources such as PDB, UniProt, Ensembl, InterPro and MobiDB. Our manuscript details subsequent enhancements in data archiving, covering successive releases encompassing model organisms, global health proteomes, Swiss-Prot integration, and a host of curated protein datasets. We detail the data access mechanisms of AlphaFold DB, from direct file access via FTP to advanced queries using Google Cloud Public Datasets and the programmatic access endpoints of the database. We also discuss the improvements and services added since its initial release, including enhancements to the Predicted Aligned Error viewer, customisation options for the 3D viewer, and improvements in the search engine of AlphaFold DB.}, } @article {pmid37932347, year = {2023}, author = {Bao, J and Wu, C and Lin, Y and Zhong, L and Chen, X and Yin, R}, title = {A scalable approach to optimize traffic signal control with federated reinforcement learning.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {19184}, pmid = {37932347}, issn = {2045-2322}, abstract = {Intelligent Transportation has seen significant advancements with Deep Learning and the Internet of Things, making Traffic Signal Control (TSC) research crucial for reducing congestion, travel time, emissions, and energy consumption. Reinforcement Learning (RL) has emerged as the primary method for TSC, but centralized learning poses communication and computing challenges, while distributed learning struggles to adapt across intersections. This paper presents a novel approach using Federated Learning (FL)-based RL for TSC. FL integrates knowledge from local agents into a global model, overcoming intersection variations with a unified agent state structure. To endow the model with the capacity to globally represent the TSC task while preserving the distinctive feature information inherent to each intersection, a segment of the RL neural network is aggregated to the cloud, and the remaining layers undergo fine-tuning upon convergence of the model training process. Extensive experiments demonstrate reduced queuing and waiting times globally, and the successful scalability of the proposed model is validated on a real-world traffic network in Monaco, showing its potential for new intersections.}, } @article {pmid37932308, year = {2023}, author = {Mangalampalli, S and Karri, GR and Mohanty, SN and Ali, S and Khan, MI and Abduvalieva, D and Awwad, FA and Ismail, EAA}, title = {Fault tolerant trust based task scheduler using Harris Hawks optimization and deep reinforcement learning in multi cloud environment.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {19179}, pmid = {37932308}, issn = {2045-2322}, abstract = {Cloud Computing model provides on demand delivery of seamless services to customers around the world yet single point of failures occurs in cloud model due to improper assignment of tasks to precise virtual machines which leads to increase in rate of failures which effects SLA based trust parameters (Availability, success rate, turnaround efficiency) upon which impacts trust on cloud provider. In this paper, we proposed a task scheduling algorithm which captures priorities of all tasks, virtual resources from task manager which comes onto cloud application console are fed to task scheduler which takes scheduling decisions based on hybridization of both Harris hawk optimization and ML based reinforcement algorithms to enhance the scheduling process. Task scheduling in this research performed in two phases i.e. Task selection and task mapping phases. In task selection phase, all incoming priorities of tasks, VMs are captured and generates schedules using Harris hawks optimization. In task mapping phase, generated schedules are optimized using a DQN model which is based on deep reinforcement learning. In this research, we used multi cloud environment to tackle availability of VMs if there is an increase in upcoming tasks dynamically and migrate tasks to one cloud to another to mitigate migration time. Extensive simulations are conducted in Cloudsim and workload generated by fabricated datasets and realtime synthetic workloads from NASA, HPC2N are used to check efficacy of our proposed scheduler (FTTHDRL). It compared against existing task schedulers i.e. MOABCQ, RATS-HM, AINN-BPSO approaches and our proposed FTTHDRL outperforms existing mechanisms by minimizing rate of failures, resource cost, improved SLA based trust parameters.}, } @article {pmid37928198, year = {2023}, author = {Mee, L and Barribeau, SM}, title = {Influence of social lifestyles on host-microbe symbioses in the bees.}, journal = {Ecology and evolution}, volume = {13}, number = {11}, pages = {e10679}, pmid = {37928198}, issn = {2045-7758}, abstract = {Microbiomes are increasingly recognised as critical for the health of an organism. In eusocial insect societies, frequent social interactions allow for high-fidelity transmission of microbes across generations, leading to closer host-microbe coevolution. The microbial communities of bees with other social lifestyles are less studied, and few comparisons have been made between taxa that vary in social structure. To address this gap, we leveraged a cloud-computing resource and publicly available transcriptomic data to conduct a survey of microbial diversity in bee samples from a variety of social lifestyles and taxa. We consistently recover the core microbes of well-studied corbiculate bees, supporting this method's ability to accurately characterise microbial communities. We find that the bacterial communities of bees are influenced by host location, phylogeny and social lifestyle, although no clear effect was found for fungal or viral microbial communities. Bee genera with more complex societies tend to harbour more diverse microbes, with Wolbachia detected more commonly in solitary tribes. We present a description of the microbiota of Euglossine bees and find that they do not share the "corbiculate core" microbiome. Notably, we find that bacteria with known anti-pathogenic properties are present across social bee genera, suggesting that symbioses that enhance host immunity are important with higher sociality. Our approach provides an inexpensive means of exploring microbiomes of a given taxa and identifying avenues for further research. These findings contribute to our understanding of the relationships between bees and their associated microbial communities, highlighting the importance of considering microbiome dynamics in investigations of bee health.}, } @article {pmid37917778, year = {2023}, author = {Qian, J and She, Q}, title = {The impact of corporate digital transformation on the export product quality: Evidence from Chinese enterprises.}, journal = {PloS one}, volume = {18}, number = {11}, pages = {e0293461}, pmid = {37917778}, issn = {1932-6203}, abstract = {The digital economy has become a driving force in the rapid development of the global economy and the promotion of export trade. Pivotal in its advent, the digital transformation of enterprises utilizes cloud computing, big data, artificial intelligence, and other digital technologies to provide an impetus for evolution and transformation in various industries and fields. in enhancing quality and efficiency. This has been critical for enhancing both quality and efficiency in enterprises based in the People's Republic of China. Through the available data on its listed enterprises, this paper measures their digital transformation through a textual analysis and examines how this transformation influences their export product quality. We then explore the possible mechanisms at work in this influence from the perspective of enterprise heterogeneity. The results find that: (1) Digital transformation significantly enhances the export product quality in an enterprises, and the empirical findings still hold after a series of robustness tests; (2) Further mechanism analysis reveals that the digital transformation can positively affect export product quality through the two mechanisms of process productivity (φ), the ability to produce output using fewer variable inputs, and product productivity (ξ), the ability to produce quality with fewer fixed outlays; (3) In terms of enterprise heterogeneity, the impact of digital transformation on export product quality is significant for enterprises engaged in general trade or high-tech industries and those with strong corporate governance. In terms of heterogeneity in digital transformation of enterprise and the regional digital infrastructure level, the higher the level of digital transformation and regional digital infrastructure, the greater the impact of digital transformation on export product quality. This paper has practical implications for public policies that offer vital aid to enterprises as they seek digital transformation to remain sync with the digital economy, upgrade their product quality, and drive the sustainable, high-quality, and healthy development of their nation's economy.}, } @article {pmid37905003, year = {2023}, author = {Copeland, CJ and Roddy, JW and Schmidt, AK and Secor, PR and Wheeler, TJ}, title = {VIBES: A Workflow for Annotating and Visualizing Viral Sequences Integrated into Bacterial Genomes.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {37905003}, issn = {2692-8205}, support = {R01 AI138981/AI/NIAID NIH HHS/United States ; R01 GM132600/GM/NIGMS NIH HHS/United States ; }, abstract = {Bacteriophages are viruses that infect bacteria. Many bacteriophages integrate their genomes into the bacterial chromosome and become prophages. Prophages may substantially burden or benefit host bacteria fitness, acting in some cases as parasites and in others as mutualists, and have been demonstrated to increase host virulence. The increasing ease of bacterial genome sequencing provides an opportunity to deeply explore prophage prevalence and insertion sites. Here we present VIBES, a workflow intended to automate prophage annotation in complete bacterial genome sequences. VIBES provides additional context to prophage annotations by annotating bacterial genes and viral proteins in user-provided bacterial and viral genomes. The VIBES pipeline is implemented as a Nextflow-driven workflow, providing a simple, unified interface for execution on local, cluster, and cloud computing environments. For each step of the pipeline, a container including all necessary software dependencies is provided. VIBES produces results in simple tab separated format and generates intuitive and interactive visualizations for data exploration. Despite VIBES' primary emphasis on prophage annotation, its generic alignment-based design allows it to be deployed as a general-purpose sequence similarity search manager. We demonstrate the utility of the VIBES prophage annotation workflow by searching for 178 Pf phage genomes across 1,072 Pseudomonas spp. genomes. VIBES software is available at https://github.com/TravisWheelerLab/VIBES.}, } @article {pmid37899771, year = {2023}, author = {Cai, T and Herner, K and Yang, T and Wang, M and Acosta Flechas, M and Harris, P and Holzman, B and Pedro, K and Tran, N}, title = {Accelerating Machine Learning Inference with GPUs in ProtoDUNE Data Processing.}, journal = {Computing and software for big science}, volume = {7}, number = {1}, pages = {11}, pmid = {37899771}, issn = {2510-2044}, abstract = {We study the performance of a cloud-based GPU-accelerated inference server to speed up event reconstruction in neutrino data batch jobs. Using detector data from the ProtoDUNE experiment and employing the standard DUNE grid job submission tools, we attempt to reprocess the data by running several thousand concurrent grid jobs, a rate we expect to be typical of current and future neutrino physics experiments. We process most of the dataset with the GPU version of our processing algorithm and the remainder with the CPU version for timing comparisons. We find that a 100-GPU cloud-based server is able to easily meet the processing demand, and that using the GPU version of the event processing algorithm is two times faster than processing these data with the CPU version when comparing to the newest CPUs in our sample. The amount of data transferred to the inference server during the GPU runs can overwhelm even the highest-bandwidth network switches, however, unless care is taken to observe network facility limits or otherwise distribute the jobs to multiple sites. We discuss the lessons learned from this processing campaign and several avenues for future improvements.}, } @article {pmid37898096, year = {2023}, author = {Horsley, JJ and Thomas, RH and Chowdhury, FA and Diehl, B and McEvoy, AW and Miserocchi, A and de Tisi, J and Vos, SB and Walker, MC and Winston, GP and Duncan, JS and Wang, Y and Taylor, PN}, title = {Complementary structural and functional abnormalities to localise epileptogenic tissue.}, journal = {EBioMedicine}, volume = {97}, number = {}, pages = {104848}, pmid = {37898096}, issn = {2352-3964}, support = {MR/T04294X/1/MRC_/Medical Research Council/United Kingdom ; G0802012/MRC_/Medical Research Council/United Kingdom ; U01 NS090407/NS/NINDS NIH HHS/United States ; /WT_/Wellcome Trust/United Kingdom ; MR/M00841X/1/MRC_/Medical Research Council/United Kingdom ; }, mesh = {Humans ; Retrospective Studies ; *Epilepsy/diagnostic imaging/surgery ; Electroencephalography/methods ; Electrocorticography ; *Drug Resistant Epilepsy/surgery ; Seizures ; }, abstract = {BACKGROUND: When investigating suitability for epilepsy surgery, people with drug-refractory focal epilepsy may have intracranial EEG (iEEG) electrodes implanted to localise seizure onset. Diffusion-weighted magnetic resonance imaging (dMRI) may be acquired to identify key white matter tracts for surgical avoidance. Here, we investigate whether structural connectivity abnormalities, inferred from dMRI, may be used in conjunction with functional iEEG abnormalities to aid localisation of the epileptogenic zone (EZ), improving surgical outcomes in epilepsy.

METHODS: We retrospectively investigated data from 43 patients (42% female) with epilepsy who had surgery following iEEG. Twenty-five patients (58%) were free from disabling seizures (ILAE 1 or 2) at one year. Interictal iEEG functional, and dMRI structural connectivity abnormalities were quantified by comparison to a normative map and healthy controls. We explored whether the resection of maximal abnormalities related to improved surgical outcomes, in both modalities individually and concurrently. Additionally, we suggest how connectivity abnormalities may inform the placement of iEEG electrodes pre-surgically using a patient case study.

FINDINGS: Seizure freedom was 15 times more likely in patients with resection of maximal connectivity and iEEG abnormalities (p = 0.008). Both modalities separately distinguished patient surgical outcome groups and when used simultaneously, a decision tree correctly separated 36 of 43 (84%) patients.

INTERPRETATION: Our results suggest that both connectivity and iEEG abnormalities may localise epileptogenic tissue, and that these two modalities may provide complementary information in pre-surgical evaluations.

FUNDING: This research was funded by UKRI, CDT in Cloud Computing for Big Data, NIH, MRC, Wellcome Trust and Epilepsy Research UK.}, } @article {pmid37896735, year = {2023}, author = {Ramzan, M and Shoaib, M and Altaf, A and Arshad, S and Iqbal, F and Castilla, ÁK and Ashraf, I}, title = {Distributed Denial of Service Attack Detection in Network Traffic Using Deep Learning Algorithm.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {20}, pages = {}, pmid = {37896735}, issn = {1424-8220}, support = {N/A//the European University of Atlantic/ ; }, abstract = {Internet security is a major concern these days due to the increasing demand for information technology (IT)-based platforms and cloud computing. With its expansion, the Internet has been facing various types of attacks. Viruses, denial of service (DoS) attacks, distributed DoS (DDoS) attacks, code injection attacks, and spoofing are the most common types of attacks in the modern era. Due to the expansion of IT, the volume and severity of network attacks have been increasing lately. DoS and DDoS are the most frequently reported network traffic attacks. Traditional solutions such as intrusion detection systems and firewalls cannot detect complex DDoS and DoS attacks. With the integration of artificial intelligence-based machine learning and deep learning methods, several novel approaches have been presented for DoS and DDoS detection. In particular, deep learning models have played a crucial role in detecting DDoS attacks due to their exceptional performance. This study adopts deep learning models including recurrent neural network (RNN), long short-term memory (LSTM), and gradient recurrent unit (GRU) to detect DDoS attacks on the most recent dataset, CICDDoS2019, and a comparative analysis is conducted with the CICIDS2017 dataset. The comparative analysis contributes to the development of a competent and accurate method for detecting DDoS attacks with reduced execution time and complexity. The experimental results demonstrate that models perform equally well on the CICDDoS2019 dataset with an accuracy score of 0.99, but there is a difference in execution time, with GRU showing less execution time than those of RNN and LSTM.}, } @article {pmid37896596, year = {2023}, author = {Sheu, RK and Lin, YC and Pardeshi, MS and Huang, CY and Pai, KC and Chen, LC and Huang, CC}, title = {Adaptive Autonomous Protocol for Secured Remote Healthcare Using Fully Homomorphic Encryption (AutoPro-RHC).}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {20}, pages = {}, pmid = {37896596}, issn = {1424-8220}, support = {Grant MOST 111-2321-B-075A-001//Ministry of Science and Technology/ ; }, mesh = {Humans ; *Blood Glucose Self-Monitoring ; *Computer Security ; Blood Glucose ; Confidentiality ; Privacy ; Delivery of Health Care ; }, abstract = {The outreach of healthcare services is a challenge to remote areas with affected populations. Fortunately, remote health monitoring (RHM) has improved the hospital service quality and has proved its sustainable growth. However, the absence of security may breach the health insurance portability and accountability act (HIPAA), which has an exclusive set of rules for the privacy of medical data. Therefore, the goal of this work is to design and implement the adaptive Autonomous Protocol (AutoPro) on the patient's remote healthcare (RHC) monitoring data for the hospital using fully homomorphic encryption (FHE). The aim is to perform adaptive autonomous FHE computations on recent RHM data for providing health status reporting and maintaining the confidentiality of every patient. The autonomous protocol works independently within the group of prime hospital servers without the dependency on the third-party system. The adaptiveness of the protocol modes is based on the patient's affected level of slight, medium, and severe cases. Related applications are given as glucose monitoring for diabetes, digital blood pressure for stroke, pulse oximeter for COVID-19, electrocardiogram (ECG) for cardiac arrest, etc. The design for this work consists of an autonomous protocol, hospital servers combining multiple prime/local hospitals, and an algorithm based on fast fully homomorphic encryption over the torus (TFHE) library with a ring-variant by the Gentry, Sahai, and Waters (GSW) scheme. The concrete-ML model used within this work is trained using an open heart disease dataset from the UCI machine learning repository. Preprocessing is performed to recover the lost and incomplete data in the dataset. The concrete-ML model is evaluated both on the workstation and cloud server. Also, the FHE protocol is implemented on the AWS cloud network with performance details. The advantages entail providing confidentiality to the patient's data/report while saving the travel and waiting time for the hospital services. The patient's data will be completely confidential and can receive emergency services immediately. The FHE results show that the highest accuracy is achieved by support vector classification (SVC) of 88% and linear regression (LR) of 86% with the area under curve (AUC) of 91% and 90%, respectively. Ultimately, the FHE-based protocol presents a novel system that is successfully demonstrated on the cloud network.}, } @article {pmid37896541, year = {2023}, author = {Ramachandran, D and Naqi, SM and Perumal, G and Abbas, Q}, title = {DLTN-LOSP: A Novel Deep-Linear-Transition-Network-Based Resource Allocation Model with the Logic Overhead Security Protocol for Cloud Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {20}, pages = {}, pmid = {37896541}, issn = {1424-8220}, support = {IMSIU-RP23067//Deanship of Scientific Research at Imam Mohammad Ibn Saud Islamic University (IMSIU)/ ; }, abstract = {Cloud organizations now face a challenge in managing the enormous volume of data and various resources in the cloud due to the rapid growth of the virtualized environment with many service users, ranging from small business owners to large corporations. The performance of cloud computing may suffer from ineffective resource management. As a result, resources must be distributed fairly among various stakeholders without sacrificing the organization's profitability or the satisfaction of its customers. A customer's request cannot be put on hold indefinitely just because the necessary resources are not available on the board. Therefore, a novel cloud resource allocation model incorporating security management is developed in this paper. Here, the Deep Linear Transition Network (DLTN) mechanism is developed for effectively allocating resources to cloud systems. Then, an Adaptive Mongoose Optimization Algorithm (AMOA) is deployed to compute the beamforming solution for reward prediction, which supports the process of resource allocation. Moreover, the Logic Overhead Security Protocol (LOSP) is implemented to ensure secured resource management in the cloud system, where Burrows-Abadi-Needham (BAN) logic is used to predict the agreement logic. During the results analysis, the performance of the proposed DLTN-LOSP model is validated and compared using different metrics such as makespan, processing time, and utilization rate. For system validation and testing, 100 to 500 resources are used in this study, and the results achieved a make-up of 2.3% and a utilization rate of 13 percent. Moreover, the obtained results confirm the superiority of the proposed framework, with better performance outcomes.}, } @article {pmid37896525, year = {2023}, author = {Pierleoni, P and Concetti, R and Belli, A and Palma, L and Marzorati, S and Esposito, M}, title = {A Cloud-IoT Architecture for Latency-Aware Localization in Earthquake Early Warning.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {20}, pages = {}, pmid = {37896525}, issn = {1424-8220}, abstract = {An effective earthquake early warning system requires rapid and reliable earthquake source detection. Despite the numerous proposed epicenter localization solutions in recent years, their utilization within the Internet of Things (IoT) framework and integration with IoT-oriented cloud platforms remain underexplored. This paper proposes a complete IoT architecture for earthquake detection, localization, and event notification. The architecture, which has been designed, deployed, and tested on a standard cloud platform, introduces an innovative approach by implementing P-wave "picking" directly on IoT devices, deviating from traditional regional earthquake early warning (EEW) approaches. Pick association, source localization, event declaration, and user notification functionalities are also deployed on the cloud. The cloud integration simplifies the integration of other services in the architecture, such as data storage and device management. Moreover, a localization algorithm based on the hyperbola method is proposed, but here, the time difference of arrival multilateration is applied that is often used in wireless sensor network applications. The results show that the proposed end-to-end architecture is able to provide a quick estimate of the earthquake epicenter location with acceptable errors for an EEW system scenario. Rigorous testing against the standard of reference in Italy for regional EEW showed an overall 3.39 s gain in the system localization speed, thus offering a tangible metric of the efficiency and potential proposed system as an EEW solution.}, } @article {pmid37895480, year = {2023}, author = {Lorenzo-Villegas, DL and Gohil, NV and Lamo, P and Gurajala, S and Bagiu, IC and Vulcanescu, DD and Horhat, FG and Sorop, VB and Diaconu, M and Sorop, MI and Oprisoni, A and Horhat, RM and Susan, M and MohanaSundaram, A}, title = {Innovative Biosensing Approaches for Swift Identification of Candida Species, Intrusive Pathogenic Organisms.}, journal = {Life (Basel, Switzerland)}, volume = {13}, number = {10}, pages = {}, pmid = {37895480}, issn = {2075-1729}, abstract = {Candida is the largest genus of medically significant fungi. Although most of its members are commensals, residing harmlessly in human bodies, some are opportunistic and dangerously invasive. These have the ability to cause severe nosocomial candidiasis and candidemia that affect the viscera and bloodstream. A prompt diagnosis will lead to a successful treatment modality. The smart solution of biosensing technologies for rapid and precise detection of Candida species has made remarkable progress. The development of point-of-care (POC) biosensor devices involves sensor precision down to pico-/femtogram level, cost-effectiveness, portability, rapidity, and user-friendliness. However, futuristic diagnostics will depend on exploiting technologies such as multiplexing for high-throughput screening, CRISPR, artificial intelligence (AI), neural networks, the Internet of Things (IoT), and cloud computing of medical databases. This review gives an insight into different biosensor technologies designed for the detection of medically significant Candida species, especially Candida albicans and C. auris, and their applications in the medical setting.}, } @article {pmid37893978, year = {2023}, author = {Dineva, K and Atanasova, T}, title = {Health Status Classification for Cows Using Machine Learning and Data Management on AWS Cloud.}, journal = {Animals : an open access journal from MDPI}, volume = {13}, number = {20}, pages = {}, pmid = {37893978}, issn = {2076-2615}, support = {Д01-62/18.03.2021///Ministry of Education and Science of the Republic Bulgaria/ ; }, abstract = {The health and welfare of livestock are significant for ensuring the sustainability and profitability of the agricultural industry. Addressing efficient ways to monitor and report the health status of individual cows is critical to prevent outbreaks and maintain herd productivity. The purpose of the study is to develop a machine learning (ML) model to classify the health status of milk cows into three categories. In this research, data are collected from existing non-invasive IoT devices and tools in a dairy farm, monitoring the micro- and macroenvironment of the cow in combination with particular information on age, days in milk, lactation, and more. A workflow of various data-processing methods is systematized and presented to create a complete, efficient, and reusable roadmap for data processing, modeling, and real-world integration. Following the proposed workflow, the data were treated, and five different ML algorithms were trained and tested to select the most descriptive one to monitor the health status of individual cows. The highest result for health status assessment is obtained by random forest classifier (RFC) with an accuracy of 0.959, recall of 0.954, and precision of 0.97. To increase the security, speed, and reliability of the work process, a cloud architecture of services is presented to integrate the trained model as an additional functionality in the Amazon Web Services (AWS) environment. The classification results of the ML model are visualized in a newly created interface in the client application.}, } @article {pmid37886380, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: Cloud Computing Load Balancing Mechanism Taking into Account Load Balancing Ant Colony Optimization Algorithm.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9831926}, pmid = {37886380}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/3120883.].}, } @article {pmid37885760, year = {2023}, author = {Hachisuca, AMM and de Souza, EG and Oliveira, WKM and Bazzi, CL and Donato, DG and Mendes, IS and Abdala, MC and Mercante, E}, title = {AgDataBox-IoT - application development for agrometeorological stations in smart.}, journal = {MethodsX}, volume = {11}, number = {}, pages = {102419}, pmid = {37885760}, issn = {2215-0161}, abstract = {Currently, Brazil is one of the world's largest grain producers and exporters. Agriculture has already entered its 4.0 version (2017), also known as digital agriculture, when the industry has entered the 4.0 era (2011). This new paradigm uses Internet of Things (IoT) techniques, sensors installed in the field, network of interconnected sensors in the plot, drones for crop monitoring, multispectral cameras, storage and processing of data in Cloud Computing, and Big Data techniques to process the large volumes of generated data. One of the practical options for implementing precision agriculture is the segmentation of the plot into management zones, aiming at maximizing profits according to the productive potential of each zone, being economically viable even for small producers. Considering that climate factors directly influence yield, this study describes the development of a sensor network for climate monitoring of management zones (microclimates), allowing the identification of climate factors that influence yield at each of its stages.•Application of the internet of things to assist in decision making in the agricultural production system.•AgDataBox (ADB-IoT) web platform has an Application Programming Interface (API).•An agrometeorological station capable of monitoring all meteorological parameters was developed (Kate 3.0).}, } @article {pmid37879464, year = {2024}, author = {Dube, T and Dube, T and Dalu, T and Gxokwe, S and Marambanyika, T}, title = {Assessment of land use and land cover, water nutrient and metal concentration related to illegal mining activities in an Austral semi-arid river system: A remote sensing and multivariate analysis approach.}, journal = {The Science of the total environment}, volume = {907}, number = {}, pages = {167919}, doi = {10.1016/j.scitotenv.2023.167919}, pmid = {37879464}, issn = {1879-1026}, abstract = {The mining sector in various countries, particularly in the sub-Saharan African region, faces significant impact from the emergence of small-scale unlicensed artisanal mines. This trend is influenced by the rising demand and prices for minerals, along with prevalent poverty levels. Thus, the detrimental impacts of these artisanal mines on the natural environment (i.e., rivers) have remained poorly understood particularly in the Zimbabwean context. To understand the consequences of this situation, a study was conducted in the Umzingwane Catchment, located in southern Zimbabwe, focusing on the variations in water nutrient and metal concentrations in rivers affected by illegal mining activities along their riparian zones. Using multi-year Sentinel-2 composite data and the random forest machine learning algorithm on the Google Earth Engine cloud-computing platform, we mapped the spatial distribution of illegal mines in the affected regions and seven distinct land use classes, including artisanal mines, bare surfaces, settlements, official mines, croplands, and natural vegetation, with an acceptable overall and class accuracies of ±70 % were identified. Artisanal mines were found to be located along rivers and this was attributed to their large water requirements needed during the mining process. The water quality analysis revealed elevated nutrient concentrations, such as ammonium and nitrate (range 0.10-20.0 mg L[-1]), which could be attributed to mine drainage from the use of ammonium nitrate explosives during mining activities. Additionally, the prevalence of croplands in the area may have potentially contributed to increased nutrient concentrations. The principal component analysis and hierarchical cluster analysis revealed three clusters, with one of these clusters showing parameters like Ca, Mg, K, Hg and Na, which are usually associated with mineral gypsum found in the drainage of artisanal mines in the selected rivers. Cluster 2 consisted of B, Cu, Fe, Pb, and Mn, which are likely from the natural environment and finally, cluster 3 contained As, Cd, Cr, and Zn, which were likely associated with both legal and illegal mining operations. These findings provide essential insights into the health of the studied river system and the impacts of human activities in the region. They further serve as a foundation for developing and implementing regulatory measures aimed at protecting riverine systems, in line with sustainable development goal 15.1 which focuses on preserving and conserving terrestrial and inland freshwater ecosystems, including rivers. By acting on this information, authorities can work towards safeguarding these vital natural resources and promoting sustainable development in the area.}, } @article {pmid40206311, year = {2023}, author = {Salman, S and Gu, Q and Dherin, B and Reddy, S and Vanderboom, P and Sharma, R and Lancaster, L and Tawk, R and Freeman, WD}, title = {Hemorrhage Evaluation and Detector System for Underserved Populations: HEADS-UP.}, journal = {Mayo Clinic proceedings. Digital health}, volume = {1}, number = {4}, pages = {547-556}, pmid = {40206311}, issn = {2949-7612}, abstract = {OBJECTIVE: To create a rapid, cloud-based, and deployable machine learning (ML) method named hemorrhage evaluation and detector system for underserved populations, potentially across the Mayo Clinic enterprise, then expand to involve underserved areas and detect the 5 subtypes of intracranial hemorrhage (IH).

METHODS: We used Radiological Society of North America dataset for IH detection. We made 4 total iterations using Google Cloud Vertex AutoML. We trained an AutoML model with 2000 images, followed by 6000 images from both IH positive and negative classes. Pixel values were measured by the Hounsfield units, presenting a width of 80 Hounsfield and a level of 40 Hounsfield as the bone window. This was followed by a more detailed image preprocessing approach by combining the pixel values from each of the brain, subdural, and soft tissue window-based gray-scale images into R(red)-channel, G(green)-channel, and B(blue)-channel images to boost the binary IH classification performance. Four experiments with AutoML were applied to study the effects of training sample size and image preprocessing on model performance.

RESULTS: Out of the 4 AutoML experiments, the best-performing model was the fourth experiment, where 95.80% average precision, 91.40% precision, and 91.40% recall were achieved. On the basis of this analysis, our binary IH classifier hemorrhage evaluation and detector system for underserved populations appeared both accurate and performed well.

CONCLUSION: Hemorrhage evaluation and detector system for underserved populations is a rapid, cloud-based, deployable ML method to detect IH. This tool can help expedite the care of patients with IH in resource-limited hospitals.}, } @article {pmid37869808, year = {2023}, author = {Gal-Nadasan, N and Stoicu-Tivadar, V and Gal-Nadasan, E and Dinu, AR}, title = {Robotic Process Automation Based Data Extraction from Handwritten Medical Forms.}, journal = {Studies in health technology and informatics}, volume = {309}, number = {}, pages = {68-72}, doi = {10.3233/SHTI230741}, pmid = {37869808}, issn = {1879-8365}, mesh = {*Robotics ; *Robotic Surgical Procedures ; Software ; Automation ; Machine Learning ; }, abstract = {This paper proposes to create an RPA(robotic process automation) based software robot that can digitalize and extract data from handwritten medical forms. The RPA robot uses a taxonomy that is specific for the medical form and associates the extracted data with the taxonomy. This is accomplished using UiPath studio to create the robot, Google Cloud Vision OCR(optical character recognition) to create the DOM (digital object model) file and UiPath machine learning (ML) API to extract the data from the medical form. Due to the fact that the medical form is in a non-standard format a data extraction template had to be applied. After the extraction process the data can be saved into databases or into a spreadsheets.}, } @article {pmid37867911, year = {2023}, author = {Eneh, AH and Udanor, CN and Ossai, NI and Aneke, SO and Ugwoke, PO and Obayi, AA and Ugwuishiwu, CH and Okereke, GE}, title = {Towards an improved internet of things sensors data quality for a smart aquaponics system yield prediction.}, journal = {MethodsX}, volume = {11}, number = {}, pages = {102436}, pmid = {37867911}, issn = {2215-0161}, abstract = {The mobile aquaponics system is a sustainable integrated aquaculture-crop production system in which wastewater from fish ponds are utilized in crop production, filtered, and returned for aquaculture uses. This process ensures the optimization of water and nutrients as well as the simultaneous production of fish and crops in portable homestead models. The Lack of datasets and documentations on monitoring growth parameters in Sub-Saharan Africa hamper the effective management and prediction of yields. Water quality impacts the fish growth rate, feed consumption, and general well-being irrespective of the system. This research presents an improvement on the IoT water quality sensor system earlier developed in a previous study in carried out in conjunction with two local catfish farmers. The improved system produced datasets that when trained using several machine learning algorithms achieved a test RMSE score of 0.6140 against 1.0128 from the old system for fish length prediction using Decision Tree Regressor. Further testing with the XGBoost Regressor achieved a test RMSE score of 7.0192 for fish weight prediction from the initial IoT dataset and 0.7793 from the improved IoT dataset. Both systems achieved a prediction accuracy of 99%. These evaluations clearly show that the improved system outperformed the initial one.•The discovery and use of improved IoT pond water quality sensors.•Development of machine learning models to evaluate the methods.•Testing of the datasets from the two methods using the machine learning models.}, } @article {pmid37864543, year = {2023}, author = {Patel, M and Dayan, I and Fishman, EK and Flores, M and Gilbert, FJ and Guindy, M and Koay, EJ and Rosenthal, M and Roth, HR and Linguraru, MG}, title = {Accelerating artificial intelligence: How federated learning can protect privacy, facilitate collaboration, and improve outcomes.}, journal = {Health informatics journal}, volume = {29}, number = {4}, pages = {14604582231207744}, doi = {10.1177/14604582231207744}, pmid = {37864543}, issn = {1741-2811}, mesh = {Humans ; *Artificial Intelligence ; Privacy ; Learning ; *Pancreatic Neoplasms ; }, abstract = {Cross-institution collaborations are constrained by data-sharing challenges. These challenges hamper innovation, particularly in artificial intelligence, where models require diverse data to ensure strong performance. Federated learning (FL) solves data-sharing challenges. In typical collaborations, data is sent to a central repository where models are trained. With FL, models are sent to participating sites, trained locally, and model weights aggregated to create a master model with improved performance. At the 2021 Radiology Society of North America's (RSNA) conference, a panel was conducted titled "Accelerating AI: How Federated Learning Can Protect Privacy, Facilitate Collaboration and Improve Outcomes." Two groups shared insights: researchers from the EXAM study (EMC CXR AI Model) and members of the National Cancer Institute's Early Detection Research Network's (EDRN) pancreatic cancer working group. EXAM brought together 20 institutions to create a model to predict oxygen requirements of patients seen in the emergency department with COVID-19 symptoms. The EDRN collaboration is focused on improving outcomes for pancreatic cancer patients through earlier detection. This paper describes major insights from the panel, including direct quotes. The panelists described the impetus for FL, the long-term potential vision of FL, challenges faced in FL, and the immediate path forward for FL.}, } @article {pmid37863925, year = {2023}, author = {Naboureh, A and Li, A and Bian, J and Lei, G and Nan, X}, title = {Land cover dataset of the China Central-Asia West-Asia Economic Corridor from 1993 to 2018.}, journal = {Scientific data}, volume = {10}, number = {1}, pages = {728}, pmid = {37863925}, issn = {2052-4463}, support = {42090015 and 41801370//National Natural Science Foundation of China (National Science Foundation of China)/ ; 42090015 and 41801370//National Natural Science Foundation of China (National Science Foundation of China)/ ; 2019365//Youth Innovation Promotion Association of the Chinese Academy of Sciences (Youth Innovation Promotion Association CAS)/ ; }, abstract = {Land Cover (LC) maps offer vital knowledge for various studies, ranging from sustainable development to climate change. The China Central-Asia West-Asia Economic Corridor region, as a core component of the Belt and Road initiative program, has been experiencing some of the most severe LC change tragedies, such as the Aral Sea crisis and Lake Urmia shrinkage, in recent decades. Therefore, there is a high demand for producing a fine-resolution, spatially-explicit, and long-term LC dataset for this region. However, except China, such dataset for the rest of the region (Kyrgyzstan, Turkmenistan, Kazakhstan, Uzbekistan, Tajikistan, Turkey, and Iran) is currently lacking. Here, we constructed a historical set of six 30-m resolution LC maps between 1993 and 2018 at 5-year time intervals for the seven countries where nearly 200,000 Landsat scenes were classified into nine LC types within Google Earth Engine cloud computing platform. The generated LC maps displayed high accuracies. This publicly available dataset has the potential to be broadly applied in environmental policy and management.}, } @article {pmid37860633, year = {2023}, author = {Muratore, L and Tsagarakis, N}, title = {XBot2D: towards a robotics hybrid cloud architecture for field robotics.}, journal = {Frontiers in robotics and AI}, volume = {10}, number = {}, pages = {1168694}, pmid = {37860633}, issn = {2296-9144}, abstract = {Nowadays, robotics applications requiring the execution of complex tasks in real-world scenarios are still facing many challenges related to highly unstructured and dynamic environments in domains such as emergency response and search and rescue where robots have to operate for prolonged periods trading off computational performance with increased power autonomy and vice versa. In particular, there is a crucial need for robots capable of adapting to such settings while at the same time providing robustness and extended power autonomy. A possible approach to overcome the conflicting demand of a computational performing system with the need for long power autonomy is represented by cloud robotics, which can boost the computational capabilities of the robot while reducing the energy consumption by exploiting the offload of resources to the cloud. Nevertheless, the communication constraint due to limited bandwidth, latency, and connectivity, typical of field robotics, makes cloud-enabled robotics solutions challenging to deploy in real-world applications. In this context, we designed and realized the XBot2D software architecture, which provides a hybrid cloud manager capable of dynamically and seamlessly allocating robotics skills to perform a distributed computation based on the current network condition and the required latency, and computational/energy resources of the robot in use. The proposed framework leverage on the two dimensions, i.e., 2D (local and cloud), in a transparent way for the user, providing support for Real-Time (RT) skills execution on the local robot, as well as machine learning and A.I. resources on the cloud with the possibility to automatically relocate the above based on the required performances and communication quality. XBot2D implementation and its functionalities are presented and validated in realistic tasks involving the CENTAURO robot and the Amazon Web Service Elastic Computing Cloud (AWS EC2) infrastructure with different network conditions.}, } @article {pmid37860604, year = {2023}, author = {Post, AR and Ho, N and Rasmussen, E and Post, I and Cho, A and Hofer, J and Maness, AT and Parnell, T and Nix, DA}, title = {Hypermedia-based software architecture enables Test-Driven Development.}, journal = {JAMIA open}, volume = {6}, number = {4}, pages = {ooad089}, pmid = {37860604}, issn = {2574-2531}, support = {P30 CA042014/CA/NCI NIH HHS/United States ; }, abstract = {OBJECTIVES: Using agile software development practices, develop and evaluate an architecture and implementation for reliable and user-friendly self-service management of bioinformatic data stored in the cloud.

MATERIALS AND METHODS: Comprehensive Oncology Research Environment (CORE) Browser is a new open-source web application for cancer researchers to manage sequencing data organized in a flexible format in Amazon Simple Storage Service (S3) buckets. It has a microservices- and hypermedia-based architecture, which we integrated with Test-Driven Development (TDD), the iterative writing of computable specifications for how software should work prior to development. Relying on repeating patterns found in hypermedia-based architectures, we hypothesized that hypermedia would permit developing test "templates" that can be parameterized and executed for each microservice, maximizing code coverage while minimizing effort.

RESULTS: After one-and-a-half years of development, the CORE Browser backend had 121 test templates and 875 custom tests that were parameterized and executed 3031 times, providing 78% code coverage.

DISCUSSION: Architecting to permit test reuse through a hypermedia approach was a key success factor for our testing efforts. CORE Browser's application of hypermedia and TDD illustrates one way to integrate software engineering methods into data-intensive networked applications. Separating bioinformatic data management from analysis distinguishes this platform from others in bioinformatics and may provide stable data management while permitting analysis methods to advance more rapidly.

CONCLUSION: Software engineering practices are underutilized in informatics. Similar informatics projects will more likely succeed through application of good architecture and automated testing. Our approach is broadly applicable to data management tools involving cloud data storage.}, } @article {pmid37860463, year = {2023}, author = {Healthcare Engineering, JO}, title = {Retracted: Application of Cloud Computing in the Prediction of Exercise Improvement of Cardiovascular and Digestive Systems in Obese Patients.}, journal = {Journal of healthcare engineering}, volume = {2023}, number = {}, pages = {9872648}, pmid = {37860463}, issn = {2040-2309}, abstract = {[This retracts the article DOI: 10.1155/2021/4695722.].}, } @article {pmid37860366, year = {2023}, author = {Healthcare Engineering, JO}, title = {Retracted: Medical Cloud Computing Data Processing to Optimize the Effect of Drugs.}, journal = {Journal of healthcare engineering}, volume = {2023}, number = {}, pages = {9869843}, pmid = {37860366}, issn = {2040-2309}, abstract = {[This retracts the article DOI: 10.1155/2021/5560691.].}, } @article {pmid37860340, year = {2023}, author = {Healthcare Engineering, JO}, title = {Retracted: Cloud Computing into Respiratory Rehabilitation Training-Assisted Treatment of Patients with Pneumonia.}, journal = {Journal of healthcare engineering}, volume = {2023}, number = {}, pages = {9795658}, pmid = {37860340}, issn = {2040-2309}, abstract = {[This retracts the article DOI: 10.1155/2021/5884174.].}, } @article {pmid37859937, year = {2023}, author = {Hornik, J and Rachamim, M and Graguer, S}, title = {Fog computing: a platform for big-data marketing analytics.}, journal = {Frontiers in artificial intelligence}, volume = {6}, number = {}, pages = {1242574}, pmid = {37859937}, issn = {2624-8212}, abstract = {Marketing science embraces a wider variety of data types and measurement tools necessary for strategy, research, and applied decision making. Managing the marketing data generated by internet of things (IoT) sensors and actuators is one of the biggest challenges faced by marketing managers when deploying an IoT system. This short note shows how traditional cloud-based IoT systems are challenged by the large scale, heterogeneity, and high latency witnessed in some cloud ecosystems. It introduces researchers to one recent breakthrough, fog computing, an emerging concept that decentralizes applications, strategies, and data analytics into the network itself using a distributed and federated computing model. It transforms centralized cloud to distributed fog by bringing storage and computation closer to the user end. Fog computing is considered a novel marketplace phenomenon which can support AI and management strategies, especially for the design of "smart marketing".}, } @article {pmid37856442, year = {2023}, author = {Uhlrich, SD and Falisse, A and Kidziński, Ł and Muccini, J and Ko, M and Chaudhari, AS and Hicks, JL and Delp, SL}, title = {OpenCap: Human movement dynamics from smartphone videos.}, journal = {PLoS computational biology}, volume = {19}, number = {10}, pages = {e1011462}, pmid = {37856442}, issn = {1553-7358}, support = {P41 EB027060/EB/NIBIB NIH HHS/United States ; R01 AR077604/AR/NIAMS NIH HHS/United States ; }, mesh = {Humans ; *Smartphone ; *Models, Biological ; Muscles/physiology ; Software ; Biomechanical Phenomena ; Movement/physiology ; }, abstract = {Measures of human movement dynamics can predict outcomes like injury risk or musculoskeletal disease progression. However, these measures are rarely quantified in large-scale research studies or clinical practice due to the prohibitive cost, time, and expertise required. Here we present and validate OpenCap, an open-source platform for computing both the kinematics (i.e., motion) and dynamics (i.e., forces) of human movement using videos captured from two or more smartphones. OpenCap leverages pose estimation algorithms to identify body landmarks from videos; deep learning and biomechanical models to estimate three-dimensional kinematics; and physics-based simulations to estimate muscle activations and musculoskeletal dynamics. OpenCap's web application enables users to collect synchronous videos and visualize movement data that is automatically processed in the cloud, thereby eliminating the need for specialized hardware, software, and expertise. We show that OpenCap accurately predicts dynamic measures, like muscle activations, joint loads, and joint moments, which can be used to screen for disease risk, evaluate intervention efficacy, assess between-group movement differences, and inform rehabilitation decisions. Additionally, we demonstrate OpenCap's practical utility through a 100-subject field study, where a clinician using OpenCap estimated musculoskeletal dynamics 25 times faster than a laboratory-based approach at less than 1% of the cost. By democratizing access to human movement analysis, OpenCap can accelerate the incorporation of biomechanical metrics into large-scale research studies, clinical trials, and clinical practice.}, } @article {pmid37854642, year = {2023}, author = {Zhang, M}, title = {Optimization Strategy of College Students' Education Management Based on Smart Cloud Platform Teaching.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {5642142}, pmid = {37854642}, issn = {1687-5273}, mesh = {Humans ; *Artificial Intelligence ; *Cloud Computing ; Students ; Big Data ; Commerce ; }, abstract = {With the passage of time and social changes, the form of education is also changing step by step. In just a few decades, information technology has developed by leaps and bounds, and digital education has not yet been widely promoted. Intelligent education cloud platforms based on big data, Internet of things, cloud computing, and artificial intelligence have begun to emerge. The research on the "smart campus" cloud platform is conducive to improving the utilization rate of existing hardware equipment in colleges and universities and is conducive in improving the level of teaching software deployment. At the same time, this research also provides a new idea for the research in the field of cloud security. While cloud computing brings convenience to teaching work, it also brings new problems to system security. At present, virtualization technology is still in the ascendant stage in the construction of "smart campus" in colleges and universities and is gradually applied to cloud computing service products. At present, there are many cases about the construction of teaching resource platform, but most of them are modified from the early resource management system, which has strong coupling of single system, insufficient functions of collecting, processing, searching, sharing, and reusing resources, and weak application support ability for related business systems. Under this social background, this paper studies the teaching process management system for intelligent classroom.}, } @article {pmid37853124, year = {2023}, author = {Wang, Y and Hollingsworth, PM and Zhai, D and West, CD and Green, JMH and Chen, H and Hurni, K and Su, Y and Warren-Thomas, E and Xu, J and Ahrends, A}, title = {High-resolution maps show that rubber causes substantial deforestation.}, journal = {Nature}, volume = {623}, number = {7986}, pages = {340-346}, pmid = {37853124}, issn = {1476-4687}, mesh = {Asia, Southeastern ; Biodiversity ; Cloud Computing ; *Conservation of Natural Resources/statistics & numerical data/trends ; *Forests ; *Geographic Mapping ; *Rubber ; *Satellite Imagery ; }, abstract = {Understanding the effects of cash crop expansion on natural forest is of fundamental importance. However, for most crops there are no remotely sensed global maps[1], and global deforestation impacts are estimated using models and extrapolations. Natural rubber is an example of a principal commodity for which deforestation impacts have been highly uncertain, with estimates differing more than fivefold[1-4]. Here we harnessed Earth observation satellite data and cloud computing[5] to produce high-resolution maps of rubber (10 m pixel size) and associated deforestation (30 m pixel size) for Southeast Asia. Our maps indicate that rubber-related forest loss has been substantially underestimated in policy, by the public and in recent reports[6-8]. Our direct remotely sensed observations show that deforestation for rubber is at least twofold to threefold higher than suggested by figures now widely used for setting policy[4]. With more than 4 million hectares of forest loss for rubber since 1993 (at least 2 million hectares since 2000) and more than 1 million hectares of rubber plantations established in Key Biodiversity Areas, the effects of rubber on biodiversity and ecosystem services in Southeast Asia could be extensive. Thus, rubber deserves more attention in domestic policy, within trade agreements and in incoming due-diligence legislation.}, } @article {pmid39749219, year = {2023}, author = {Giri, C and Long, J and Poudel, P}, title = {Mangrove Forest Cover Change in the Conterminous United States from 1980-2020.}, journal = {Remote sensing}, volume = {15}, number = {20}, pages = {1-16}, pmid = {39749219}, issn = {2072-4292}, support = {EPA999999/ImEPA/Intramural EPA/United States ; }, abstract = {Mangrove forests in developed and developing countries are experiencing substantial transformations driven by natural and anthropogenic factors. This study focuses on the conterminous United States, including Florida, Texas, and Louisiana, where coastal development, urbanization, hydrological pattern alterations, global warming, sea level rise, and natural disasters such as hurricanes contribute to mangrove forest changes. Using time-series Landsat data and image-processing techniques in a cloud computing platform, we analyzed the dynamics of mangrove forests every five years from 1980 to 2020. Each thematic product was independently derived using a region of interest (ROI) suitable for local conditions. The analysis was performed using consistent data sources and a unified classification methodology. Our results revealed that the total mangrove area in the conterminous United States (CONUS) in 2020 was 266,179 ha. with 98.0% of the mangrove area in Florida, 0.6% in Louisiana, and 1.4% in Texas. Approximately 85% of the CONUS mangrove area was found between 24.5° and 26.0° latitude. Overall, mangrove forests in the CONUS increased by 13.5% from 1980 to 2020. However, the quinquennial variation in aerial coverage fluctuated substantially. The validation of 2020 using a statistical sample of reference data confirmed the high accuracy of 95%. Our results can aid policymakers and conservationists in developing targeted strategies for preserving the ecological and socio-economic value of mangrove forests in the conterminous United States. Additionally, all the datasets generated from this study have been released to the public.}, } @article {pmid37850120, year = {2023}, author = {Teng, Z and Chen, J and Wang, J and Wu, S and Chen, R and Lin, Y and Shen, L and Jackson, R and Zhou, J and Yang, C}, title = {Panicle-Cloud: An Open and AI-Powered Cloud Computing Platform for Quantifying Rice Panicles from Drone-Collected Imagery to Enable the Classification of Yield Production in Rice.}, journal = {Plant phenomics (Washington, D.C.)}, volume = {5}, number = {}, pages = {0105}, pmid = {37850120}, issn = {2643-6515}, abstract = {Rice (Oryza sativa) is an essential stable food for many rice consumption nations in the world and, thus, the importance to improve its yield production under global climate changes. To evaluate different rice varieties' yield performance, key yield-related traits such as panicle number per unit area (PNpM[2]) are key indicators, which have attracted much attention by many plant research groups. Nevertheless, it is still challenging to conduct large-scale screening of rice panicles to quantify the PNpM[2] trait due to complex field conditions, a large variation of rice cultivars, and their panicle morphological features. Here, we present Panicle-Cloud, an open and artificial intelligence (AI)-powered cloud computing platform that is capable of quantifying rice panicles from drone-collected imagery. To facilitate the development of AI-powered detection models, we first established an open diverse rice panicle detection dataset that was annotated by a group of rice specialists; then, we integrated several state-of-the-art deep learning models (including a preferred model called Panicle-AI) into the Panicle-Cloud platform, so that nonexpert users could select a pretrained model to detect rice panicles from their own aerial images. We trialed the AI models with images collected at different attitudes and growth stages, through which the right timing and preferred image resolutions for phenotyping rice panicles in the field were identified. Then, we applied the platform in a 2-season rice breeding trial to valid its biological relevance and classified yield production using the platform-derived PNpM[2] trait from hundreds of rice varieties. Through correlation analysis between computational analysis and manual scoring, we found that the platform could quantify the PNpM[2] trait reliably, based on which yield production was classified with high accuracy. Hence, we trust that our work demonstrates a valuable advance in phenotyping the PNpM[2] trait in rice, which provides a useful toolkit to enable rice breeders to screen and select desired rice varieties under field conditions.}, } @article {pmid37848896, year = {2023}, author = {Kline, JA and Reed, B and Frost, A and Alanis, N and Barshay, M and Melzer, A and Galbraith, JW and Budd, A and Winn, A and Pun, E and Camargo, CA}, title = {Database derived from an electronic medical record-based surveillance network of US emergency department patients with acute respiratory illness.}, journal = {BMC medical informatics and decision making}, volume = {23}, number = {1}, pages = {224}, pmid = {37848896}, issn = {1472-6947}, mesh = {Humans ; *Electronic Health Records ; Emergency Service, Hospital ; *Respiratory Tract Infections/diagnosis/epidemiology ; Laboratories ; Public Health ; }, abstract = {BACKGROUND: For surveillance of episodic illness, the emergency department (ED) represents one of the largest interfaces for generalizable data about segments of the US public experiencing a need for unscheduled care. This protocol manuscript describes the development and operation of a national network linking symptom, clinical, laboratory and disposition data that provides a public database dedicated to the surveillance of acute respiratory infections (ARIs) in EDs.

METHODS: The Respiratory Virus Laboratory Emergency Department Network Surveillance (RESP-LENS) network includes 26 academic investigators, from 24 sites, with 91 hospitals, and the Centers for Disease Control and Prevention (CDC) to survey viral infections. All data originate from electronic medical records (EMRs) accessed by structured query language (SQL) coding. Each Tuesday, data are imported into the standard data form for ARI visits that occurred the prior week (termed the index file); outcomes at 30 days and ED volume are also recorded. Up to 325 data fields can be populated for each case. Data are transferred from sites into an encrypted Google Cloud Platform, then programmatically checked for compliance, parsed, and aggregated into a central database housed on a second cloud platform prior to transfer to CDC.

RESULTS: As of August, 2023, the network has reported data on over 870,000 ARI cases selected from approximately 5.2 million ED encounters. Post-contracting challenges to network execution have included local shifts in testing policies and platforms, delays in ICD-10 coding to detect ARI cases, and site-level personnel turnover. The network is addressing these challenges and is poised to begin streaming weekly data for dissemination.

CONCLUSIONS: The RESP-LENS network provides a weekly updated database that is a public health resource to survey the epidemiology, viral causes, and outcomes of ED patients with acute respiratory infections.}, } @article {pmid37848573, year = {2023}, author = {Atchyuth, BAS and Swain, R and Das, P}, title = {Near real-time flood inundation and hazard mapping of Baitarani River Basin using Google Earth Engine and SAR imagery.}, journal = {Environmental monitoring and assessment}, volume = {195}, number = {11}, pages = {1331}, pmid = {37848573}, issn = {1573-2959}, mesh = {*Floods ; *Rivers ; Search Engine ; Environmental Monitoring/methods ; Water ; }, abstract = {Flood inundation mapping and satellite imagery monitoring are critical and effective responses during flood events. Mapping of a flood using optical data is limited due to the unavailability of cloud-free images. Because of its capacity to penetrate clouds and operate in all kinds of weather, synthetic aperture radar is preferred for water inundation mapping. Flood mapping in Eastern India's Baitarani River Basin for 2018, 2019, 2020, 2021, and 2022 was performed in this study using Sentinel-1 imagery and Google Earth Engine with Otsu's algorithm. Different machine-learning algorithms were used to map the LULC of the study region. Dual polarizations VH and VV and their combinations VV×VH, VV+VH, VH-VV, VV-VH, VV/VH, and VH/VV were examined to identify non-water and water bodies. The normalized difference water index (NDWI) map derived from Sentinel-2 data validated the surface water inundation with 80% accuracy. The total inundated areas were identified as 440.3 km[2] in 2018, 268.58 km[2] in 2019, 178.40 km[2] in 2020, 203.79 km[2] in 2021, and 321.33 km[2] in 2022, respectively. The overlap of flood maps on the LULC map indicated that flooding highly affected agriculture and urban areas in these years. The approach using the near-real-time Sentinel-1 SAR imagery and GEE platform can be operationalized for periodic flood mapping, helps develop flood control measures, and helps enhance flood management. The generated annual flood inundation maps are also useful for policy development, agriculture yield estimation, crop insurance framing, etc.}, } @article {pmid37841693, year = {2023}, author = {Familiar, AM and Mahtabfar, A and Fathi Kazerooni, A and Kiani, M and Vossough, A and Viaene, A and Storm, PB and Resnick, AC and Nabavizadeh, A}, title = {Radio-pathomic approaches in pediatric neuro-oncology: Opportunities and challenges.}, journal = {Neuro-oncology advances}, volume = {5}, number = {1}, pages = {vdad119}, pmid = {37841693}, issn = {2632-2498}, support = {75N91019D00024/CA/NCI NIH HHS/United States ; }, abstract = {With medical software platforms moving to cloud environments with scalable storage and computing, the translation of predictive artificial intelligence (AI) models to aid in clinical decision-making and facilitate personalized medicine for cancer patients is becoming a reality. Medical imaging, namely radiologic and histologic images, has immense analytical potential in neuro-oncology, and models utilizing integrated radiomic and pathomic data may yield a synergistic effect and provide a new modality for precision medicine. At the same time, the ability to harness multi-modal data is met with challenges in aggregating data across medical departments and institutions, as well as significant complexity in modeling the phenotypic and genotypic heterogeneity of pediatric brain tumors. In this paper, we review recent pathomic and integrated pathomic, radiomic, and genomic studies with clinical applications. We discuss current challenges limiting translational research on pediatric brain tumors and outline technical and analytical solutions. Overall, we propose that to empower the potential residing in radio-pathomics, systemic changes in cross-discipline data management and end-to-end software platforms to handle multi-modal data sets are needed, in addition to embracing modern AI-powered approaches. These changes can improve the performance of predictive models, and ultimately the ability to advance brain cancer treatments and patient outcomes through the development of such models.}, } @article {pmid37840574, year = {2023}, author = {Jang, H and Park, S and Koh, H}, title = {Comprehensive microbiome causal mediation analysis using MiMed on user-friendly web interfaces.}, journal = {Biology methods & protocols}, volume = {8}, number = {1}, pages = {bpad023}, pmid = {37840574}, issn = {2396-8923}, abstract = {It is a central goal of human microbiome studies to see the roles of the microbiome as a mediator that transmits environmental, behavioral, or medical exposures to health or disease outcomes. Yet, mediation analysis is not used as much as it should be. One reason is because of the lack of carefully planned routines, compilers, and automated computing systems for microbiome mediation analysis (MiMed) to perform a series of data processing, diversity calculation, data normalization, downstream data analysis, and visualizations. Many researchers in various disciplines (e.g. clinicians, public health practitioners, and biologists) are not also familiar with related statistical methods and programming languages on command-line interfaces. Thus, in this article, we introduce a web cloud computing platform, named as MiMed, that enables comprehensive MiMed on user-friendly web interfaces. The main features of MiMed are as follows. First, MiMed can survey the microbiome in various spheres (i) as a whole microbial ecosystem using different ecological measures (e.g. alpha- and beta-diversity indices) or (ii) as individual microbial taxa (e.g. phyla, classes, orders, families, genera, and species) using different data normalization methods. Second, MiMed enables covariate-adjusted analysis to control for potential confounding factors (e.g. age and gender), which is essential to enhance the causality of the results, especially for observational studies. Third, MiMed enables a breadth of statistical inferences in both mediation effect estimation and significance testing. Fourth, MiMed provides flexible and easy-to-use data processing and analytic modules and creates nice graphical representations. Finally, MiMed employs ChatGPT to search for what has been known about the microbial taxa that are found significantly as mediators using artificial intelligence technologies. For demonstration purposes, we applied MiMed to the study on the mediating roles of oral microbiome in subgingival niches between e-cigarette smoking and gingival inflammation. MiMed is freely available on our web server (http://mimed.micloud.kr).}, } @article {pmid37838111, year = {2024}, author = {Li, W and Li, SM and Kang, MC and Xiong, X and Wang, P and Tao, LQ}, title = {Multi-characteristic tannic acid-reinforced polyacrylamide/sodium carboxymethyl cellulose ionic hydrogel strain sensor for human-machine interaction.}, journal = {International journal of biological macromolecules}, volume = {254}, number = {Pt 2}, pages = {127434}, doi = {10.1016/j.ijbiomac.2023.127434}, pmid = {37838111}, issn = {1879-0003}, mesh = {Humans ; *Carboxymethylcellulose Sodium ; Ions ; *Hydrogels ; Electric Conductivity ; }, abstract = {Big data and cloud computing are propelling research in human-computer interface within academia. However, the potential of wearable human-machine interaction (HMI) devices utilizing multiperformance ionic hydrogels remains largely unexplored. Here, we present a motion recognition-based HMI system that enhances movement training. We engineered dual-network PAM/CMC/TA (PCT) hydrogels by reinforcing polyacrylamide (PAM) and sodium carboxymethyl cellulose (CMC) polymers with tannic acid (TA). These hydrogels possess exceptional transparency, adhesion, and remodelling features. By combining an elastic PAM backbone with tunable amounts of CMC and TA, the PCT hydrogels achieve optimal electromechanical performance. As strain sensors, they demonstrate higher sensitivity (GF = 4.03), low detection limit (0.5 %), and good linearity (0.997). Furthermore, we developed a highly accurate (97.85 %) motion recognition system using machine learning and hydrogel-based wearable sensors. This system enables contactless real-time training monitoring and wireless control of trolley operations. Our research underscores the effectiveness of PCT hydrogels for real-time HMI, thus advancing next-generation HMI systems.}, } @article {pmid37837127, year = {2023}, author = {Al-Bazzaz, H and Azam, M and Amayri, M and Bouguila, N}, title = {Unsupervised Mixture Models on the Edge for Smart Energy Consumption Segmentation with Feature Saliency.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {19}, pages = {}, pmid = {37837127}, issn = {1424-8220}, abstract = {Smart meter datasets have recently transitioned from monthly intervals to one-second granularity, yielding invaluable insights for diverse metering functions. Clustering analysis, a fundamental data mining technique, is extensively applied to discern unique energy consumption patterns. However, the advent of high-resolution smart meter data brings forth formidable challenges, including non-Gaussian data distributions, unknown cluster counts, and varying feature importance within high-dimensional spaces. This article introduces an innovative learning framework integrating the expectation-maximization algorithm with the minimum message length criterion. This unified approach enables concurrent feature and model selection, finely tuned for the proposed bounded asymmetric generalized Gaussian mixture model with feature saliency. Our experiments aim to replicate an efficient smart meter data analysis scenario by incorporating three distinct feature extraction methods. We rigorously validate the clustering efficacy of our proposed algorithm against several state-of-the-art approaches, employing diverse performance metrics across synthetic and real smart meter datasets. The clusters that we identify effectively highlight variations in residential energy consumption, furnishing utility companies with actionable insights for targeted demand reduction efforts. Moreover, we demonstrate our method's robustness and real-world applicability by harnessing Concordia's High-Performance Computing infrastructure. This facilitates efficient energy pattern characterization, particularly within smart meter environments involving edge cloud computing. Finally, we emphasize that our proposed mixture model outperforms three other models in this paper's comparative study. We achieve superior performance compared to the non-bounded variant of the proposed mixture model by an average percentage improvement of 7.828%.}, } @article {pmid37832430, year = {2023}, author = {Schacherer, DP and Herrmann, MD and Clunie, DA and Höfener, H and Clifford, W and Longabaugh, WJR and Pieper, S and Kikinis, R and Fedorov, A and Homeyer, A}, title = {The NCI Imaging Data Commons as a platform for reproducible research in computational pathology.}, journal = {Computer methods and programs in biomedicine}, volume = {242}, number = {}, pages = {107839}, pmid = {37832430}, issn = {1872-7565}, support = {HHSN261201500003C/CA/NCI NIH HHS/United States ; HHSN261201500003I/CA/NCI NIH HHS/United States ; }, mesh = {Humans ; *Software ; Reproducibility of Results ; Cloud Computing ; Diagnostic Imaging ; *Lung Neoplasms/diagnostic imaging ; }, abstract = {BACKGROUND AND OBJECTIVES: Reproducibility is a major challenge in developing machine learning (ML)-based solutions in computational pathology (CompPath). The NCI Imaging Data Commons (IDC) provides >120 cancer image collections according to the FAIR principles and is designed to be used with cloud ML services. Here, we explore its potential to facilitate reproducibility in CompPath research.

METHODS: Using the IDC, we implemented two experiments in which a representative ML-based method for classifying lung tumor tissue was trained and/or evaluated on different datasets. To assess reproducibility, the experiments were run multiple times with separate but identically configured instances of common ML services.

RESULTS: The results of different runs of the same experiment were reproducible to a large extent. However, we observed occasional, small variations in AUC values, indicating a practical limit to reproducibility.

CONCLUSIONS: We conclude that the IDC facilitates approaching the reproducibility limit of CompPath research (i) by enabling researchers to reuse exactly the same datasets and (ii) by integrating with cloud ML services so that experiments can be run in identically configured computing environments.}, } @article {pmid37831665, year = {2023}, author = {Saif, Y and Yusof, Y and Rus, AZM and Ghaleb, AM and Mejjaouli, S and Al-Alimi, S and Didane, DH and Latif, K and Abdul Kadir, AZ and Alshalabi, H and Sadeq, S}, title = {Implementing circularity measurements in industry 4.0-based manufacturing metrology using MQTT protocol and Open CV: A case study.}, journal = {PloS one}, volume = {18}, number = {10}, pages = {e0292814}, pmid = {37831665}, issn = {1932-6203}, mesh = {*Commerce ; *Industry ; Algorithms ; Cloud Computing ; Communication ; }, abstract = {In the context of Industry 4.0, manufacturing metrology is crucial for inspecting and measuring machines. The Internet of Things (IoT) technology enables seamless communication between advanced industrial devices through local and cloud computing servers. This study investigates the use of the MQTT protocol to enhance the performance of circularity measurement data transmission between cloud servers and round-hole data sources through Open CV. Accurate inspection of circular characteristics, particularly roundness errors, is vital for lubricant distribution, assemblies, and rotational force innovation. Circularity measurement techniques employ algorithms like the minimal zone circle tolerance algorithm. Vision inspection systems, utilizing image processing techniques, can promptly and accurately detect quality concerns by analyzing the model's surface through circular dimension analysis. This involves sending the model's image to a computer, which employs techniques such as Hough Transform, Edge Detection, and Contour Analysis to identify circular features and extract relevant parameters. This method is utilized in the camera industry and component assembly. To assess the performance, a comparative experiment was conducted between the non-contact-based 3SMVI system and the contact-based CMM system widely used in various industries for roundness evaluation. The CMM technique is known for its high precision but is time-consuming. Experimental results indicated a variation of 5 to 9.6 micrometers between the two methods. It is suggested that using a high-resolution camera and appropriate lighting conditions can further enhance result precision.}, } @article {pmid37829921, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: An Optimized Decision Method for Smart Teaching Effect Based on Cloud Computing and Deep Learning.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9862737}, pmid = {37829921}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/6907172.].}, } @article {pmid37829877, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: The Construction of Big Data Computational Intelligence System for E-Government in Cloud Computing Environment and Its Development Impact.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9873976}, pmid = {37829877}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/7295060.].}, } @article {pmid37829372, year = {2023}, author = {Healthcare Engineering, JO}, title = {Retracted: Construction of a Health Management Model for Early Identification of Ischaemic Stroke in Cloud Computing.}, journal = {Journal of healthcare engineering}, volume = {2023}, number = {}, pages = {9820647}, pmid = {37829372}, issn = {2040-2309}, abstract = {[This retracts the article DOI: 10.1155/2022/1018056.].}, } @article {pmid37819909, year = {2023}, author = {Wang, TY and Cui, J and Fan, Y}, title = {A wearable-based sports health monitoring system using CNN and LSTM with self-attentions.}, journal = {PloS one}, volume = {18}, number = {10}, pages = {e0292012}, pmid = {37819909}, issn = {1932-6203}, mesh = {Humans ; Athletes ; *Athletic Performance ; *Cell Phone ; Neural Networks, Computer ; *Wearable Electronic Devices ; }, abstract = {Sports performance and health monitoring are essential for athletes to maintain peak performance and avoid potential injuries. In this paper, we propose a sports health monitoring system that utilizes wearable devices, cloud computing, and deep learning to monitor the health status of sports persons. The system consists of a wearable device that collects various physiological parameters and a cloud server that contains a deep learning model to predict the sportsperson's health status. The proposed model combines a Convolutional Neural Network (CNN), Long Short-Term Memory (LSTM), and self-attention mechanisms. The model is trained on a large dataset of sports persons' physiological data and achieves an accuracy of 93%, specificity of 94%, precision of 95%, and an F1 score of 92%. The sports person can access the cloud server using their mobile phone to receive a report of their health status, which can be used to monitor their performance and make any necessary adjustments to their training or competition schedule.}, } @article {pmid37819832, year = {2023}, author = {Ruiz-Zafra, A and Precioso, D and Salvador, B and Lubian-Lopez, SP and Jimenez, J and Benavente-Fernandez, I and Pigueiras, J and Gomez-Ullate, D and Gontard, LC}, title = {NeoCam: An Edge-Cloud Platform for Non-Invasive Real-Time Monitoring in Neonatal Intensive Care Units.}, journal = {IEEE journal of biomedical and health informatics}, volume = {27}, number = {6}, pages = {2614-2624}, doi = {10.1109/JBHI.2023.3240245}, pmid = {37819832}, issn = {2168-2208}, mesh = {Infant, Newborn ; Infant ; Humans ; *Intensive Care Units, Neonatal ; *Cloud Computing ; Infant, Premature ; Software ; Algorithms ; }, abstract = {In this work we introduce NeoCam, an open source hardware-software platform for video-based monitoring of preterms infants in Neonatal Intensive Care Units (NICUs). NeoCam includes an edge computing device that performs video acquisition and processing in real-time. Compared to other proposed solutions, it has the advantage of handling data more efficiently by performing most of the processing on the device, including proper anonymisation for better compliance with privacy regulations. In addition, it allows to perform various video analysis tasks of clinical interest in parallel at speeds of between 20 and 30 frames-per-second. We introduce algorithms to measure without contact the breathing rate, motor activity, body pose and emotional status of the infants. For breathing rate, our system shows good agreement with existing methods provided there is sufficient light and proper imaging conditions. Models for motor activity and stress detection are new to the best of our knowledge. NeoCam has been tested on preterms in the NICU of the University Hospital Puerta del Mar (Cádiz, Spain), and we report the lessons learned from this trial.}, } @article {pmid37819321, year = {2023}, author = {Machado, IA and Lacerda, MAS and Martinez-Blanco, MDR and Serrano, A and García-Baonza, R and Ortiz-Rodriguez, JM}, title = {Chameleon: a cloud computing Industry 4.0 neutron spectrum unfolding code.}, journal = {Radiation protection dosimetry}, volume = {199}, number = {15-16}, pages = {1877-1882}, doi = {10.1093/rpd/ncac298}, pmid = {37819321}, issn = {1742-3406}, support = {APQ-01018-21//Fundação de Amparo à Pesquisa do Estado de Minas Gerais/ ; //Conselho Nacional de Desenvolvimento Científico e Tecnológico/ ; //OMADS Co./ ; }, mesh = {*Cloud Computing ; *Algorithms ; Neural Networks, Computer ; Internet ; Neutrons ; }, abstract = {This work presents Chameleon, a cloud computing (CC) Industry 4.0 (I4) neutron spectrum unfolding code. The code was designed under the Python programming language, using Streamlit framework®, and it is executed on the cloud, as I4 CC technology through internet, by using mobile devices with internet connectivity and a web navigator. In its first version, as a proof of concept, the SPUNIT algorithm was implemented. The main functionalities and the preliminary tests performed to validate the code are presented. Chameleon solves the neutron spectrum unfolding problem and it is easy, friendly and intuitive. It can be applied with success in various workplaces. More validation tests are in progress. Future implementations will include improving the graphical user interface, inserting other algorithms, such as GRAVEL, MAXED and neural networks, and implementing an algorithm to estimate uncertainties in the calculated integral quantities.}, } @article {pmid37816030, year = {2023}, author = {, }, title = {Retraction: Relationship between employees' career maturity and career planning of edge computing and cloud collaboration from the perspective of organizational behavior.}, journal = {PloS one}, volume = {18}, number = {10}, pages = {e0292209}, pmid = {37816030}, issn = {1932-6203}, } @article {pmid37809681, year = {2023}, author = {Chen, C and Yang, X and Jiang, S and Liu, Z}, title = {Mapping and spatiotemporal dynamics of land-use and land-cover change based on the Google Earth Engine cloud platform from Landsat imagery: A case study of Zhoushan Island, China.}, journal = {Heliyon}, volume = {9}, number = {9}, pages = {e19654}, pmid = {37809681}, issn = {2405-8440}, abstract = {Land resources are an essential foundation for socioeconomic development. Island land resources are limited, the type changes are particularly frequent, and the environment is fragile. Therefore, large-scale, long-term, and high-accuracy land-use classification and spatiotemporal characteristic analysis are of great significance for the sustainable development of islands. Based on the advantages of remote sensing indices and principal component analysis in accurate classification, and taking Zhoushan Archipelago, China, as the study area, in this work long-term satellite remote sensing data were used to perform land-use classification and spatiotemporal characteristic analysis. The classification results showed that the land-use types could be exactly classified, with the overall accuracy and Kappa coefficient greater than 94% and 0.93, respectively. The results of the spatiotemporal characteristic analysis showed that the built-up land and forest land areas increased by 90.00 km[2] and 36.83 km[2], respectively, while the area of the cropland/grassland decreased by 69.77 km[2]. The areas of the water bodies, tidal flats, and bare land exhibited slight change trends. The spatial coverage of Zhoushan Island continuously expanded toward the coast, encroaching on nearby sea areas and tidal flats. The cropland/grassland was the most transferred-out area, at up to 108.94 km[2], and built-up land was the most transferred-in areas, at up to 73.31 km[2]. This study provides a data basis and technical support for the scientific management of land resources.}, } @article {pmid37804778, year = {2023}, author = {Lakhan, A and Mohammed, MA and Abdulkareem, KH and Hamouda, H and Alyahya, S}, title = {Autism Spectrum Disorder detection framework for children based on federated learning integrated CNN-LSTM.}, journal = {Computers in biology and medicine}, volume = {166}, number = {}, pages = {107539}, doi = {10.1016/j.compbiomed.2023.107539}, pmid = {37804778}, issn = {1879-0534}, mesh = {Humans ; *Autism Spectrum Disorder/diagnosis ; Child ; Child, Preschool ; Male ; Female ; Neural Networks, Computer ; Machine Learning ; }, abstract = {The incidence of Autism Spectrum Disorder (ASD) among children, attributed to genetics and environmental factors, has been increasing daily. ASD is a non-curable neurodevelopmental disorder that affects children's communication, behavior, social interaction, and learning skills. While machine learning has been employed for ASD detection in children, existing ASD frameworks offer limited services to monitor and improve the health of ASD patients. This paper presents a complex and efficient ASD framework with comprehensive services to enhance the results of existing ASD frameworks. Our proposed approach is the Federated Learning-enabled CNN-LSTM (FCNN-LSTM) scheme, designed for ASD detection in children using multimodal datasets. The ASD framework is built in a distributed computing environment where different ASD laboratories are connected to the central hospital. The FCNN-LSTM scheme enables local laboratories to train and validate different datasets, including Ages and Stages Questionnaires (ASQ), Facial Communication and Symbolic Behavior Scales (CSBS) Dataset, Parents Evaluate Developmental Status (PEDS), Modified Checklist for Autism in Toddlers (M-CHAT), and Screening Tool for Autism in Toddlers and Children (STAT) datasets, on different computing laboratories. To ensure the security of patient data, we have implemented a security mechanism based on advanced standard encryption (AES) within the federated learning environment. This mechanism allows all laboratories to offload and download data securely. We integrate all trained datasets into the aggregated nodes and make the final decision for ASD patients based on the decision process tree. Additionally, we have designed various Internet of Things (IoT) applications to improve the efficiency of ASD patients and achieve more optimal learning results. Simulation results demonstrate that our proposed framework achieves an ASD detection accuracy of approximately 99% compared to all existing ASD frameworks.}, } @article {pmid37794709, year = {2024}, author = {Lee, J and Kim, H and Kron, F}, title = {Virtual education strategies in the context of sustainable health care and medical education: A topic modelling analysis of four decades of research.}, journal = {Medical education}, volume = {58}, number = {1}, pages = {47-62}, doi = {10.1111/medu.15202}, pmid = {37794709}, issn = {1365-2923}, support = {NRF-2021R1F1A1056465//Ministry of Science & ICT/ ; }, mesh = {Humans ; Artificial Intelligence ; *Education, Medical ; Delivery of Health Care ; Learning ; *Virtual Reality ; }, abstract = {BACKGROUND: The growing importance of sustainability has led to the current literature being saturated with studies on the necessity of, and suggested topics for, education for sustainable health care (ESH). Even so, ESH implementation has been hindered by educator unpreparedness and resource scarcity. A potential resolution lies in virtual education. However, research on the strategies needed for successfully implementing virtual education in the context of sustainable health care and medical education is sparse; this study aims to fill the gap.

METHODS: Topic modelling, a computational text-mining method for analysing recurring patterns of co-occurring word clusters to reveal key topics prevalent across the texts, was used to examine how sustainability was addressed in research in medicine, medical education, and virtual education. A total of 17 631 studies, retrieved from Web of Science, Scopus and PubMed, were analysed.

RESULTS: Sustainability-related topics within health care, medical education and virtual education provided systematic implications for Sustainable Virtual Medical Education (SVME)-ESH via virtual platforms in a sustainable way. Analyses of keywords, phrases, topics and their associated networks indicate that SVME should address the three pillars of environmental, social and economic sustainability and medical practices to uphold them; employ different technologies and methods including simulations, virtual reality (VR), artificial intelligence (AI), cloud computing, distance learning; and implement strategies for collaborative development, persuasive diffusion and quality assurance.

CONCLUSIONS: This research suggests that sustainable strategies in virtual education for ESH require a systems approach, encompassing components such as learning content and objectives, evaluation, targeted learners, media, methods and strategies. The advancement of SVME necessitates that medical educators and researchers play a central and bridging role, guiding both the fields of sustainable health care and medical education in the development and implementation of SVME. In this way, they can prepare future physicians to address sustainability issues that impact patient care.}, } @article {pmid37773456, year = {2025}, author = {Buyukcavus, MH and Aydogan Akgun, F and Solak, S and Ucar, MHB and Fındık, Y and Baykul, T}, title = {Facial recognition by cloud-based APIs following surgically assisted rapid maxillary expansion.}, journal = {Journal of orofacial orthopedics = Fortschritte der Kieferorthopadie : Organ/official journal Deutsche Gesellschaft fur Kieferorthopadie}, volume = {86}, number = {2}, pages = {98-107}, pmid = {37773456}, issn = {1615-6714}, mesh = {Humans ; Male ; Female ; *Palatal Expansion Technique ; *Automated Facial Recognition/methods ; *Cloud Computing ; Treatment Outcome ; Reproducibility of Results ; Sensitivity and Specificity ; Adolescent ; *Face ; Adult ; Young Adult ; Photography/methods ; Cephalometry/methods ; }, abstract = {INTRODUCTION: This study aimed to investigate whether the facial soft tissue changes of individuals who had undergone surgically assisted rapid maxillary expansion (SARME) would be detected by three different well-known facial biometric recognition applications.

METHODS: To calculate similarity scores, the pre- and postsurgical photographs of 22 patients who had undergone SARME treatment were examined using three prominent cloud computing-based facial recognition application programming interfaces (APIs): AWS Rekognition (Amazon Web Services, Seattle, WA, USA), Microsoft Azure Cognitive (Microsoft, Redmond, WA, USA), and Face++ (Megvii, Beijing, China). The pre- and post-SARME photographs of the patients (relaxed, smiling, profile, and semiprofile) were used to calculate similarity scores using the APIs. Friedman's two-way analysis of variance and the Wilcoxon signed-rank test were used to compare the similarity scores obtained from the photographs of the different aspects of the face before and after surgery using the different programs. The relationship between measurements on lateral and posteroanterior cephalograms and the similarity scores was evaluated using the Spearman rank correlation.

RESULTS: The similarity scores were found to be lower with the Face++ program. When looking at the photo types, it was observed that the similarity scores were higher in the smiling photos. A statistically significant difference in the similarity scores (P < 0.05) was found between the relaxed and smiling photographs using the different programs. The correlation between the cephalometric and posteroanterior measurements and the similarity scores was not significant (P > 0.05).

CONCLUSION: SARME treatment caused a significant change in the similarity scores calculated with the help of three different facial recognition programs. The highest similarity scores were found in the smiling photographs, whereas the lowest scores were found in the profile photographs.}, } @article {pmid37766066, year = {2023}, author = {Mangalampalli, S and Karri, GR and Gupta, A and Chakrabarti, T and Nallamala, SH and Chakrabarti, P and Unhelkar, B and Margala, M}, title = {Fault-Tolerant Trust-Based Task Scheduling Algorithm Using Harris Hawks Optimization in Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {18}, pages = {}, pmid = {37766066}, issn = {1424-8220}, abstract = {Cloud computing is a distributed computing model which renders services for cloud users around the world. These services need to be rendered to customers with high availability and fault tolerance, but there are still chances of having single-point failures in the cloud paradigm, and one challenge to cloud providers is effectively scheduling tasks to avoid failures and acquire the trust of their cloud services by users. This research proposes a fault-tolerant trust-based task scheduling algorithm in which we carefully schedule tasks within precise virtual machines by calculating priorities for tasks and VMs. Harris hawks optimization was used as a methodology to design our scheduler. We used Cloudsim as a simulating tool for our entire experiment. For the entire simulation, we used synthetic fabricated data with different distributions and real-time supercomputer worklogs. Finally, we evaluated the proposed approach (FTTATS) with state-of-the-art approaches, i.e., ACO, PSO, and GA. From the simulation results, our proposed FTTATS greatly minimizes the makespan for ACO, PSO and GA algorithms by 24.3%, 33.31%, and 29.03%, respectively. The rate of failures for ACO, PSO, and GA were minimized by 65.31%, 65.4%, and 60.44%, respectively. Trust-based SLA parameters improved, i.e., availability improved for ACO, PSO, and GA by 33.38%, 35.71%, and 28.24%, respectively. The success rate improved for ACO, PSO, and GA by 52.69%, 39.41%, and 38.45%, respectively. Turnaround efficiency was minimized for ACO, PSO, and GA by 51.8%, 47.2%, and 33.6%, respectively.}, } @article {pmid37765972, year = {2023}, author = {Emish, M and Kelani, Z and Hassani, M and Young, SD}, title = {A Mobile Health Application Using Geolocation for Behavioral Activity Tracking.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {18}, pages = {}, pmid = {37765972}, issn = {1424-8220}, support = {R01 MD018548/MD/NIMHD NIH HHS/United States ; R34 DA054511/DA/NIDA NIH HHS/United States ; n/a/DA/NIDA NIH HHS/United States ; n/a/AT/NCCIH NIH HHS/United States ; }, mesh = {*Mobile Applications ; Smartphone ; Advertising ; Algorithms ; *Blockchain ; }, abstract = {The increasing popularity of mHealth presents an opportunity for collecting rich datasets using mobile phone applications (apps). Our health-monitoring mobile application uses motion detection to track an individual's physical activity and location. The data collected are used to improve health outcomes, such as reducing the risk of chronic diseases and promoting healthier lifestyles through analyzing physical activity patterns. Using smartphone motion detection sensors and GPS receivers, we implemented an energy-efficient tracking algorithm that captures user locations whenever they are in motion. To ensure security and efficiency in data collection and storage, encryption algorithms are used with serverless and scalable cloud storage design. The database schema is designed around Mobile Advertising ID (MAID) as a unique identifier for each device, allowing for accurate tracking and high data quality. Our application uses Google's Activity Recognition Application Programming Interface (API) on Android OS or geofencing and motion sensors on iOS to track most smartphones available. In addition, our app leverages blockchain and traditional payments to streamline the compensations and has an intuitive user interface to encourage participation in research. The mobile tracking app was tested for 20 days on an iPhone 14 Pro Max, finding that it accurately captured location during movement and promptly resumed tracking after inactivity periods, while consuming a low percentage of battery life while running in the background.}, } @article {pmid37765912, year = {2023}, author = {Lilhore, UK and Manoharan, P and Simaiya, S and Alroobaea, R and Alsafyani, M and Baqasah, AM and Dalal, S and Sharma, A and Raahemifar, K}, title = {HIDM: Hybrid Intrusion Detection Model for Industry 4.0 Networks Using an Optimized CNN-LSTM with Transfer Learning.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {18}, pages = {}, pmid = {37765912}, issn = {1424-8220}, abstract = {Industrial automation systems are undergoing a revolutionary change with the use of Internet-connected operating equipment and the adoption of cutting-edge advanced technology such as AI, IoT, cloud computing, and deep learning within business organizations. These innovative and additional solutions are facilitating Industry 4.0. However, the emergence of these technological advances and the quality solutions that they enable will also introduce unique security challenges whose consequence needs to be identified. This research presents a hybrid intrusion detection model (HIDM) that uses OCNN-LSTM and transfer learning (TL) for Industry 4.0. The proposed model utilizes an optimized CNN by using enhanced parameters of the CNN via the grey wolf optimizer (GWO) method, which fine-tunes the CNN parameters and helps to improve the model's prediction accuracy. The transfer learning model helps to train the model, and it transfers the knowledge to the OCNN-LSTM model. The TL method enhances the training process, acquiring the necessary knowledge from the OCNN-LSTM model and utilizing it in each next cycle, which helps to improve detection accuracy. To measure the performance of the proposed model, we conducted a multi-class classification analysis on various online industrial IDS datasets, i.e., ToN-IoT and UNW-NB15. We have conducted two experiments for these two datasets, and various performance-measuring parameters, i.e., precision, F-measure, recall, accuracy, and detection rate, were calculated for the OCNN-LSTM model with and without TL and also for the CNN and LSTM models. For the ToN-IoT dataset, the OCNN-LSTM with TL model achieved a precision of 92.7%; for the UNW-NB15 dataset, the precision was 94.25%, which is higher than OCNN-LSTM without TL.}, } @article {pmid37765893, year = {2023}, author = {Li, M and Zhang, J and Lin, J and Chen, Z and Zheng, X}, title = {FireFace: Leveraging Internal Function Features for Configuration of Functions on Serverless Edge Platforms.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {18}, pages = {}, pmid = {37765893}, issn = {1424-8220}, support = {62072108//the National Natural Science Foundation of China/ ; 83021094//the Funds for Scientific Research of Fujian Provincial Department of Finance/ ; }, abstract = {The emerging serverless computing has become a captivating paradigm for deploying cloud applications, alleviating developers' concerns about infrastructure resource management by configuring necessary parameters such as latency and memory constraints. Existing resource configuration solutions for cloud-based serverless applications can be broadly classified into modeling based on historical data or a combination of sparse measurements and interpolation/modeling. In pursuit of service response and conserving network bandwidth, platforms have progressively expanded from the traditional cloud to the edge. Compared to cloud platforms, serverless edge platforms often lead to more running overhead due to their limited resources, resulting in undesirable financial costs for developers when using the existing solutions. Meanwhile, it is extremely challenging to handle the heterogeneity of edge platforms, characterized by distinct pricing owing to their varying resource preferences. To tackle these challenges, we propose an adaptive and efficient approach called FireFace, consisting of prediction and decision modules. The prediction module extracts the internal features of all functions within the serverless application and uses this information to predict the execution time of the functions under specific configuration schemes. Based on the prediction module, the decision module analyzes the environment information and uses the Adaptive Particle Swarm Optimization algorithm and Genetic Algorithm Operator (APSO-GA) algorithm to select the most suitable configuration plan for each function, including CPU, memory, and edge platforms. In this way, it is possible to effectively minimize the financial overhead while fulfilling the Service Level Objectives (SLOs). Extensive experimental results show that our prediction model obtains optimal results under all three metrics, and the prediction error rate for real-world serverless applications is in the range of 4.25∼9.51%. Our approach can find the optimal resource configuration scheme for each application, which saves 7.2∼44.8% on average compared to other classic algorithms. Moreover, FireFace exhibits rapid adaptability, efficiently adjusting resource allocation schemes in response to dynamic environments.}, } @article {pmid37765859, year = {2023}, author = {Yang, D and Liu, Z and Wei, S}, title = {Interactive Learning for Network Anomaly Monitoring and Detection with Human Guidance in the Loop.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {18}, pages = {}, pmid = {37765859}, issn = {1424-8220}, abstract = {With the advancement in big data and cloud computing technology, we have witnessed tremendous developments in applying intelligent techniques in network operation and management. However, learning- and data-based solutions for network operation and maintenance cannot effectively adapt to the dynamic security situation or satisfy administrators' expectations alone. Anomaly detection of time-series monitoring indicators has been a major challenge for network administrative personnel. Monitored indicators in network operations are characterized by multiple instances with high dimensions and fluctuating time-series features and rely on system resource deployment and business environment variations. Hence, there is a growing consensus that conducting anomaly detection with machine intelligence under the operation and maintenance personnel's guidance is more effective than solely using learning and modeling. This paper intends to model the anomaly detection task as a Markov Decision Process and adopts the Double Deep Q-Network algorithm to train an anomaly detection agent, in which the multidimensional temporal convolution network is applied as the principal structure of the Q network and the interactive guidance information from the operation and maintenance personnel is introduced into the procedure to facilitate model convergence. Experimental results on the SMD dataset indicate that the proposed modeling and detection method achieves higher precision and recall rates compared to other learning-based methods. Our method achieves model optimization by using human-computer interactions continuously, which guarantees a faster and more consistent model training procedure and convergence.}, } @article {pmid37765801, year = {2023}, author = {Canonico, M and Desimoni, F and Ferrero, A and Grassi, PA and Irwin, C and Campani, D and Dal Molin, A and Panella, M and Magistrelli, L}, title = {Gait Monitoring and Analysis: A Mathematical Approach.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {18}, pages = {}, pmid = {37765801}, issn = {1424-8220}, abstract = {Gait abnormalities are common in the elderly and individuals diagnosed with Parkinson's, often leading to reduced mobility and increased fall risk. Monitoring and assessing gait patterns in these populations play a crucial role in understanding disease progression, early detection of motor impairments, and developing personalized rehabilitation strategies. In particular, by identifying gait irregularities at an early stage, healthcare professionals can implement timely interventions and personalized therapeutic approaches, potentially delaying the onset of severe motor symptoms and improving overall patient outcomes. In this paper, we studied older adults affected by chronic diseases and/or Parkinson's disease by monitoring their gait due to wearable devices that can accurately detect a person's movements. In our study, about 50 people were involved in the trial (20 with Parkinson's disease and 30 people with chronic diseases) who have worn our device for at least 6 months. During the experimentation, each device collected 25 samples from the accelerometer sensor for each second. By analyzing those data, we propose a metric for the "gait quality" based on the measure of entropy obtained by applying the Fourier transform.}, } @article {pmid37765790, year = {2023}, author = {Wu, YL and Wang, CS and Weng, WC and Lin, YC}, title = {Development of a Cloud-Based Image Processing Health Checkup System for Multi-Item Urine Analysis.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {18}, pages = {}, pmid = {37765790}, issn = {1424-8220}, abstract = {With the busy pace of modern life, an increasing number of people are afflicted by lifestyle diseases. Going directly to the hospital for medical checks is not only time-consuming but also costly. Fortunately, the emergence of rapid tests has alleviated this burden. Accurately interpreting test results is extremely important; misinterpreting the results of rapid tests could lead to delayed medical treatment. Given that URS-10 serve as a rapid test capable of detecting 10 distinct parameters in urine samples, the results of assessing these parameters can offer insights into the subject's physiological condition. These parameters encompass aspects such as metabolism, renal function, diabetes, urinary tract disorders, hemolytic diseases, and acid-base balance, among others. Although the operational procedure is straightforward, the variegated color changes exhibited in the outcomes of individual parameters render it challenging for lay users to deduce causal factors solely from color variations. Moreover, potential misinterpretations could arise due to visual discrepancies. In this study, we successfully developed a cloud-based health checkup system that can be used in an indoor environment. The system is used by placing a URS-10 test strip on a colorimetric board developed for this study, then using a smartphone application to take images which are uploaded to a server for cloud computing. Finally, the interpretation results are stored in the cloud and sent back to the smartphone to be checked by the user. Furthermore, to confirm whether the color calibration technology can eliminate color differences between different cameras, and also whether the colorimetric board and the urine test strips can perform color comparisons correctly in different light intensity environments, indoor environments that could simulate a specific light intensity were established for testing purposes. When comparing the experimental results to real test strips, only two groups failed to reach an identification success rate of 100%, and in both of these cases the success rate reached 95%. The experimental results confirmed that the system developed in this study was able to eliminate color differences between camera devices and could be used without special technical requirements or training.}, } @article {pmid37757612, year = {2023}, author = {Palmer, GA and Tomkin, G and Martín-Alcalá, HE and Mendizabal-Ruiz, G and Cohen, J}, title = {The Internet of Things in assisted reproduction.}, journal = {Reproductive biomedicine online}, volume = {47}, number = {5}, pages = {103338}, doi = {10.1016/j.rbmo.2023.103338}, pmid = {37757612}, issn = {1472-6491}, mesh = {Humans ; *Internet of Things ; Internet ; Automation ; Laboratories ; Reproduction ; }, abstract = {The Internet of Things (IoT) is a network connecting physical objects with sensors, software and internet connectivity for data exchange. Integrating the IoT with medical devices shows promise in healthcare, particularly in IVF laboratories. By leveraging telecommunications, cybersecurity, data management and intelligent systems, the IoT can enable a data-driven laboratory with automation, improved conditions, personalized treatment and efficient workflows. The integration of 5G technology ensures fast and reliable connectivity for real-time data transmission, while blockchain technology secures patient data. Fog computing reduces latency and enables real-time analytics. Microelectromechanical systems enable wearable IoT and miniaturized monitoring devices for tracking IVF processes. However, challenges such as security risks and network issues must be addressed through cybersecurity measures and networking advancements. Clinical embryologists should maintain their expertise and knowledge for safety and oversight, even with IoT in the IVF laboratory.}, } @article {pmid37746608, year = {2023}, author = {Baghdadi, A and Guo, E and Lama, S and Singh, R and Chow, M and Sutherland, GR}, title = {Force Profile as Surgeon-Specific Signature.}, journal = {Annals of surgery open : perspectives of surgical history, education, and clinical approaches}, volume = {4}, number = {3}, pages = {e326}, pmid = {37746608}, issn = {2691-3593}, abstract = {OBJECTIVE: To investigate the notion that a surgeon's force profile can be the signature of their identity and performance.

SUMMARY BACKGROUND DATA: Surgeon performance in the operating room is an understudied topic. The advent of deep learning methods paired with a sensorized surgical device presents an opportunity to incorporate quantitative insight into surgical performance and processes. Using a device called the SmartForceps System and through automated analytics, we have previously reported surgeon force profile, surgical skill, and task classification. However, an investigation of whether an individual surgeon can be identified by surgical technique has yet to be studied.

METHODS: In this study, we investigate multiple neural network architectures to identify the surgeon associated with their time-series tool-tissue forces using bipolar forceps data. The surgeon associated with each 10-second window of force data was labeled, and the data were randomly split into 80% for model training and validation (10% validation) and 20% for testing. Data imbalance was mitigated through subsampling from more populated classes with a random size adjustment based on 0.1% of sample counts in the respective class. An exploratory analysis of force segments was performed to investigate underlying patterns differentiating individual surgical techniques.

RESULTS: In a dataset of 2819 ten-second time segments from 89 neurosurgical cases, the best-performing model achieved a micro-average area under the curve of 0.97, a testing F1-score of 0.82, a sensitivity of 82%, and a precision of 82%. This model was a time-series ResNet model to extract features from the time-series data followed by a linearized output into the XGBoost algorithm. Furthermore, we found that convolutional neural networks outperformed long short-term memory networks in performance and speed. Using a weighted average approach, an ensemble model was able to identify an expert surgeon with 83.8% accuracy using a validation dataset.

CONCLUSIONS: Our results demonstrate that each surgeon has a unique force profile amenable to identification using deep learning methods. We anticipate our models will enable a quantitative framework to provide bespoke feedback to surgeons and to track their skill progression longitudinally. Furthermore, the ability to recognize individual surgeons introduces the mechanism of correlating outcome to surgeon performance.}, } @article {pmid37745890, year = {2023}, author = {Habib, W and Connolly, J}, title = {A national-scale assessment of land use change in peatlands between 1989 and 2020 using Landsat data and Google Earth Engine-a case study of Ireland.}, journal = {Regional environmental change}, volume = {23}, number = {4}, pages = {124}, pmid = {37745890}, issn = {1436-3798}, abstract = {Over the centuries, anthropogenic pressure has severely impacted peatlands on the European continent. Peatlands cover ~ 21% (1.46 Mha) of Ireland's land surface, but 85% have been degraded due to management activities (land use). Ireland needs to meet its 2030 climate energy framework targets related to greenhouse gas (GHG) emissions from land use, land use change and forestry, including wetlands. Despite Ireland's voluntary decision to include peatlands in this system in 2020, information on land use activities and associated GHG emissions from peatlands is lacking. This study strives to fill this information gap by using Landsat (5, 8) data with Google Earth Engine and machine learning to examine and quantify land use on Irish peatlands across three time periods: 1990, 2005 and 2019. Four peatland land use classes were mapped and assessed: industrial peat extraction, forestry, grassland and residual peatland. The overall accuracy of the classification was 86% and 85% for the 2005 and 2019 maps, respectively. The accuracy of the 1990 dataset could not be assessed due to the unavailability of high-resolution reference data. The results indicate that extensive management activities have taken place in peatlands over the past three decades, which may have negative impacts on its ecological integrity and the many ecosystem services provided. By utilising cloud computing, temporal mosaicking and Landsat data, this study developed a robust methodology that overcomes cloud contamination and produces the first peatland land use maps of Ireland with wall-to-wall coverage. This has the potential for regional and global applications, providing maps that could help understand unsustainable management practices on peatlands and the impact on GHG emissions.}, } @article {pmid37745873, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: The Reform of University Education Teaching Based on Cloud Computing and Big Data Background.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9893153}, pmid = {37745873}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/8169938.].}, } @article {pmid37744210, year = {2023}, author = {Verner, E and Petropoulos, H and Baker, B and Bockholt, HJ and Fries, J and Bohsali, A and Raja, R and Trinh, DH and Calhoun, V}, title = {BrainForge: an online data analysis platform for integrative neuroimaging acquisition, analysis, and sharing.}, journal = {Concurrency and computation : practice & experience}, volume = {35}, number = {18}, pages = {}, pmid = {37744210}, issn = {1532-0626}, support = {R01 MH118695/MH/NIMH NIH HHS/United States ; R01 MH123610/MH/NIMH NIH HHS/United States ; R41 MH122201/MH/NIMH NIH HHS/United States ; R41 MH100070/MH/NIMH NIH HHS/United States ; R01 EB020407/EB/NIBIB NIH HHS/United States ; }, abstract = {BrainForge is a cloud-enabled, web-based analysis platform for neuroimaging research. This website allows users to archive data from a study and effortlessly process data on a high-performance computing cluster. After analyses are completed, results can be quickly shared with colleagues. BrainForge solves multiple problems for researchers who want to analyze neuroimaging data, including issues related to software, reproducibility, computational resources, and data sharing. BrainForge can currently process structural, functional, diffusion, and arterial spin labeling MRI modalities, including preprocessing and group level analyses. Additional pipelines are currently being added, and the pipelines can accept the BIDS format. Analyses are conducted completely inside of Singularity containers and utilize popular software packages including Nipype, Statistical Parametric Mapping, the Group ICA of fMRI Toolbox, and FreeSurfer. BrainForge also features several interfaces for group analysis, including a fully automated adaptive ICA approach.}, } @article {pmid37738400, year = {2023}, author = {Lim, HG and Fann, YC and Lee, YG}, title = {COWID: an efficient cloud-based genomics workflow for scalable identification of SARS-COV-2.}, journal = {Briefings in bioinformatics}, volume = {24}, number = {5}, pages = {}, pmid = {37738400}, issn = {1477-4054}, support = {75N91019D00024/CA/NCI NIH HHS/United States ; HHSN261201500003I/CA/NCI NIH HHS/United States ; ZIC NS009443/ImNIH/Intramural NIH HHS/United States ; HHSN261201400008C/NH/NIH HHS/United States ; }, mesh = {Humans ; *COVID-19/diagnosis ; Cloud Computing ; SARS-CoV-2/genetics ; Workflow ; Genomics ; }, abstract = {Implementing a specific cloud resource to analyze extensive genomic data on severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2) poses a challenge when resources are limited. To overcome this, we repurposed a cloud platform initially designed for use in research on cancer genomics (https://cgc.sbgenomics.com) to enable its use in research on SARS-CoV-2 to build Cloud Workflow for Viral and Variant Identification (COWID). COWID is a workflow based on the Common Workflow Language that realizes the full potential of sequencing technology for use in reliable SARS-CoV-2 identification and leverages cloud computing to achieve efficient parallelization. COWID outperformed other contemporary methods for identification by offering scalable identification and reliable variant findings with no false-positive results. COWID typically processed each sample of raw sequencing data within 5 min at a cost of only US$0.01. The COWID source code is publicly available (https://github.com/hendrick0403/COWID) and can be accessed on any computer with Internet access. COWID is designed to be user-friendly; it can be implemented without prior programming knowledge. Therefore, COWID is a time-efficient tool that can be used during a pandemic.}, } @article {pmid37732291, year = {2023}, author = {Pessin, VZ and Santos, CAS and Yamane, LH and Siman, RR and Baldam, RL and Júnior, VL}, title = {A method of Mapping Process for scientific production using the Smart Bibliometrics.}, journal = {MethodsX}, volume = {11}, number = {}, pages = {102367}, pmid = {37732291}, issn = {2215-0161}, abstract = {Big data launches a modern way of producing science and research around the world. Due to an explosion of data available in scientific databases, combined with recent advances in information technology, the researcher has at his disposal new methods and technologies that facilitate scientific development. Considering the challenges of producing science in a dynamic and complex scenario, the main objective of this article is to present a method aligned with tools recently developed to support scientific production, based on steps and technologies that will help researchers to materialize their objectives efficiently and effectively. Applying this method, the researcher can apply science mapping and bibliometric techniques with agility, taking advantage of an easy-to-use solution with cloud computing capabilities. From the application of the "Scientific Mapping Process", the researcher will be able to generate strategic information for a result-oriented scientific production, assertively going through the main steps of research and boosting scientific discovery in the most diverse fields of investigation. •The Scientific Mapping Process provides a method and a system to boost scientific development.•It automates Science Mapping and bibliometric analysis from scientific datasets.•It facilitates the researcher's work, increasing the assertiveness in scientific production.}, } @article {pmid37729405, year = {2023}, author = {Willett, DS and Brannock, J and Dissen, J and Keown, P and Szura, K and Brown, OB and Simonson, A}, title = {NOAA Open Data Dissemination: Petabyte-scale Earth system data in the cloud.}, journal = {Science advances}, volume = {9}, number = {38}, pages = {eadh0032}, pmid = {37729405}, issn = {2375-2548}, abstract = {NOAA Open Data Dissemination (NODD) makes NOAA environmental data publicly and freely available on Amazon Web Services (AWS), Microsoft Azure (Azure), and Google Cloud Platform (GCP). These data can be accessed by anyone with an internet connection and span key datasets across the Earth system including satellite imagery, radar, weather models and observations, ocean databases, and climate data records. Since its inception, NODD has grown to provide public access to more than 24 PB of NOAA data and can support billions of requests and petabytes of access daily. Stakeholders routinely access more than 5 PB of NODD data every month. NODD continues to grow to support open petabyte-scale Earth system data science in the cloud by onboarding additional NOAA data and exploring performant data formats. Here, we document how this program works with a focus on provenance, key datasets, and use. We also highlight how to access these data with the goal of accelerating use of NOAA resources in the cloud.}, } @article {pmid37718323, year = {2023}, author = {Namazi, F and Ezoji, M and Parmehr, EG}, title = {Paddy Rice mapping in fragmented lands by improved phenology curve and correlation measurements on Sentinel-2 imagery in Google earth engine.}, journal = {Environmental monitoring and assessment}, volume = {195}, number = {10}, pages = {1220}, doi = {10.1007/s10661-023-11808-3}, pmid = {37718323}, issn = {1573-2959}, mesh = {*Oryza ; Search Engine ; Environmental Monitoring ; Algorithms ; Water ; }, abstract = {Accurate and timely rice crop mapping is important to address the challenges of food security, water management, disease transmission, and land use change. However, accurate rice crop mapping is difficult due to the presence of mixed pixels in small and fragmented rice fields as well as cloud cover. In this paper, a phenology-based method using Sentinel-2 time series images is presented to solve these problems. First, the improved rice phenology curve is extracted based on Normalized Difference Vegetation Index and Land Surface Water Index time series data of rice fields. Then, correlation was taken between rice phenology curve and time series data of each pixel. The correlation result of each pixel shows the similarity of its time series behavior with the proposed rice phenology curve. In the next step, the maximum correlation value and its occurrence time are used as the feature vectors of each pixel to classification. Since correlation measurement provides data with better separability than its input data, training the classifier can be done with fewer samples and the classification is more accurate. The implementation of the proposed correlation-based algorithm can be done in a parallel computing. All the processes were performed on the Google Earth Engine cloud platform on the time series images of the Sentinel 2. The implementations show the high accuracy of this method.}, } @article {pmid37705635, year = {2023}, author = {Yang, J and Han, J and Wan, Q and Xing, S and Chen, F}, title = {A novel similarity measurement for triangular cloud models based on dual consideration of shape and distance.}, journal = {PeerJ. Computer science}, volume = {9}, number = {}, pages = {e1506}, pmid = {37705635}, issn = {2376-5992}, abstract = {It is important to be able to measure the similarity between two uncertain concepts for many real-life AI applications, such as image retrieval, collaborative filtering, risk assessment, and data clustering. Cloud models are important cognitive computing models that show promise in measuring the similarity of uncertain concepts. Here, we aim to address the shortcomings of existing cloud model similarity measurement algorithms, such as poor discrimination ability and unstable measurement results. We propose an EPTCM algorithm based on the triangular fuzzy number EW-type closeness and cloud drop variance, considering the shape and distance similarities of existing cloud models. The experimental results show that the EPTCM algorithm has good recognition and classification accuracy and is more accurate than the existing Likeness comparing method (LICM), overlap-based expectation curve (OECM), fuzzy distance-based similarity (FDCM) and multidimensional similarity cloud model (MSCM) methods. The experimental results also demonstrate that the EPTCM algorithm has successfully overcome the shortcomings of existing algorithms. In summary, the EPTCM method proposed here is effective and feasible to implement.}, } @article {pmid37702950, year = {2024}, author = {Pribec, I and Hachinger, S and Hayek, M and Pringle, GJ and Brüchle, H and Jamitzky, F and Mathias, G}, title = {Efficient and Reliable Data Management for Biomedical Applications.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2716}, number = {}, pages = {383-403}, pmid = {37702950}, issn = {1940-6029}, mesh = {*Data Management ; *Big Data ; Cloud Computing ; Documentation ; Movement ; }, abstract = {This chapter discusses the challenges and requirements of modern Research Data Management (RDM), particularly for biomedical applications in the context of high-performance computing (HPC). The FAIR data principles (Findable, Accessible, Interoperable, Reusable) are of special importance. Data formats, publication platforms, annotation schemata, automated data management and staging, the data infrastructure in HPC centers, file transfer and staging methods in HPC, and the EUDAT components are discussed. Tools and approaches for automated data movement and replication in cross-center workflows are explained, as well as the development of ontologies for structuring and quality-checking of metadata in computational biomedicine. The CompBioMed project is used as a real-world example of implementing these principles and tools in practice. The LEXIS project has built a workflow-execution and data management platform that follows the paradigm of HPC-Cloud convergence for demanding Big Data applications. It is used for orchestrating workflows with YORC, utilizing the data documentation initiative (DDI) and distributed computing resources (DCI). The platform is accessed by a user-friendly LEXIS portal for workflow and data management, making HPC and Cloud Computing significantly more accessible. Checkpointing, duplicate runs, and spare images of the data are used to create resilient workflows. The CompBioMed project is completing the implementation of such a workflow, using data replication and brokering, which will enable urgent computing on exascale platforms.}, } @article {pmid37702940, year = {2024}, author = {Bonde, B}, title = {Edge, Fog, and Cloud Against Disease: The Potential of High-Performance Cloud Computing for Pharma Drug Discovery.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2716}, number = {}, pages = {181-202}, pmid = {37702940}, issn = {1940-6029}, mesh = {Algorithms ; *Artificial Intelligence ; *Cloud Computing ; Drug Discovery ; Software ; }, abstract = {The high-performance computing (HPC) platform for large-scale drug discovery simulation demands significant investment in speciality hardware, maintenance, resource management, and running costs. The rapid growth in computing hardware has made it possible to provide cost-effective, robust, secure, and scalable alternatives to the on-premise (on-prem) HPC via Cloud, Fog, and Edge computing. It has enabled recent state-of-the-art machine learning (ML) and artificial intelligence (AI)-based tools for drug discovery, such as BERT, BARD, AlphaFold2, and GPT. This chapter attempts to overview types of software architectures for developing scientific software or application with deployment agnostic (on-prem to cloud and hybrid) use cases. Furthermore, the chapter aims to outline how the innovation is disrupting the orthodox mindset of monolithic software running on on-prem HPC and provide the paradigm shift landscape to microservices driven application programming (API) and message parsing interface (MPI)-based scientific computing across the distributed, high-available infrastructure. This is coupled with agile DevOps, and good coding practices, low code and no-code application development frameworks for cost-efficient, secure, automated, and robust scientific application life cycle management.}, } @article {pmid37693890, year = {2023}, author = {Zhang, W and Zhang, C and Cao, L and Liang, F and Xie, W and Tao, L and Chen, C and Yang, M and Zhong, L}, title = {Application of digital-intelligence technology in the processing of Chinese materia medica.}, journal = {Frontiers in pharmacology}, volume = {14}, number = {}, pages = {1208055}, pmid = {37693890}, issn = {1663-9812}, abstract = {Processing of Chinese Materia Medica (PCMM) is the concentrated embodiment, which is the core of Chinese unique traditional pharmaceutical technology. The processing includes the preparation steps such as cleansing, cutting and stir-frying, to make certain impacts on the quality and efficacy of Chinese botanical drugs. The rapid development of new computer digital technologies, such as big data analysis, Internet of Things (IoT), blockchain and cloud computing artificial intelligence, has promoted the rapid development of traditional pharmaceutical manufacturing industry with digitalization and intellectualization. In this review, the application of digital intelligence technology in the PCMM was analyzed and discussed, which hopefully promoted the standardization of the process and secured the quality of botanical drugs decoction pieces. Through the intellectualization and the digitization of production, safety and effectiveness of clinical use of traditional Chinese medicine (TCM) decoction pieces were ensured. This review also provided a theoretical basis for further technical upgrading and high-quality development of TCM industry.}, } @article {pmid37693367, year = {2023}, author = {Griffin, AC and Khairat, S and Bailey, SC and Chung, AE}, title = {A chatbot for hypertension self-management support: user-centered design, development, and usability testing.}, journal = {JAMIA open}, volume = {6}, number = {3}, pages = {ooad073}, pmid = {37693367}, issn = {2574-2531}, support = {P30 DK092949/DK/NIDDK NIH HHS/United States ; UM1 TR004406/TR/NCATS NIH HHS/United States ; }, abstract = {OBJECTIVES: Health-related chatbots have demonstrated early promise for improving self-management behaviors but have seldomly been utilized for hypertension. This research focused on the design, development, and usability evaluation of a chatbot for hypertension self-management, called "Medicagent."

MATERIALS AND METHODS: A user-centered design process was used to iteratively design and develop a text-based chatbot using Google Cloud's Dialogflow natural language understanding platform. Then, usability testing sessions were conducted among patients with hypertension. Each session was comprised of: (1) background questionnaires, (2) 10 representative tasks within Medicagent, (3) System Usability Scale (SUS) questionnaire, and (4) a brief semi-structured interview. Sessions were video and audio recorded using Zoom. Qualitative and quantitative analyses were used to assess effectiveness, efficiency, and satisfaction of the chatbot.

RESULTS: Participants (n = 10) completed nearly all tasks (98%, 98/100) and spent an average of 18 min (SD = 10 min) interacting with Medicagent. Only 11 (8.6%) utterances were not successfully mapped to an intent. Medicagent achieved a mean SUS score of 78.8/100, which demonstrated acceptable usability. Several participants had difficulties navigating the conversational interface without menu and back buttons, felt additional information would be useful for redirection when utterances were not recognized, and desired a health professional persona within the chatbot.

DISCUSSION: The text-based chatbot was viewed favorably for assisting with blood pressure and medication-related tasks and had good usability.

CONCLUSION: Flexibility of interaction styles, handling unrecognized utterances gracefully, and having a credible persona were highlighted as design components that may further enrich the user experience of chatbots for hypertension self-management.}, } @article {pmid37692531, year = {2023}, author = {Angelidis, E}, title = {A perspective on large-scale simulation as an enabler for novel biorobotics applications.}, journal = {Frontiers in robotics and AI}, volume = {10}, number = {}, pages = {1102286}, pmid = {37692531}, issn = {2296-9144}, abstract = {Our understanding of the complex mechanisms that power biological intelligence has been greatly enhanced through the explosive growth of large-scale neuroscience and robotics simulation tools that are used by the research community to perform previously infeasible experiments, such as the simulation of the neocortex's circuitry. Nevertheless, simulation falls far from being directly applicable to biorobots due to the large discrepancy between the simulated and the real world. A possible solution for this problem is the further enhancement of existing simulation tools for robotics, AI and neuroscience with multi-physics capabilities. Previously infeasible or difficult to simulate scenarios, such as robots swimming on the water surface, interacting with soft materials, walking on granular materials etc., would be rendered possible within a multi-physics simulation environment designed for robotics. In combination with multi-physics simulation, large-scale simulation tools that integrate multiple simulation modules in a closed-loop manner help address fundamental questions around the organization of neural circuits and the interplay between the brain, body and environment. We analyze existing designs for large-scale simulation running on cloud and HPC infrastructure as well as their shortcomings. Based on this analysis we propose a next-gen modular architecture design based on multi-physics engines, that we believe would greatly benefit biorobotics and AI.}, } @article {pmid37688118, year = {2023}, author = {Urblik, L and Kajati, E and Papcun, P and Zolotova, I}, title = {A Modular Framework for Data Processing at the Edge: Design and Implementation.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {17}, pages = {}, pmid = {37688118}, issn = {1424-8220}, support = {APVV-20-0247//Slovak Research and Development Agency/ ; }, abstract = {There is a rapid increase in the number of edge devices in IoT solutions, generating vast amounts of data that need to be processed and analyzed efficiently. Traditional cloud-based architectures can face latency, bandwidth, and privacy challenges when dealing with this data flood. There is currently no unified approach to the creation of edge computing solutions. This work addresses this problem by exploring containerization for data processing solutions at the network's edge. The current approach involves creating a specialized application compatible with the device used. Another approach involves using containerization for deployment and monitoring. The heterogeneity of edge environments would greatly benefit from a universal modular platform. Our proposed edge computing-based framework implements a streaming extract, transform, and load pipeline for data processing and analysis using ZeroMQ as the communication backbone and containerization for scalable deployment. Results demonstrate the effectiveness of the proposed framework, making it suitable for time-sensitive IoT applications.}, } @article {pmid37688051, year = {2023}, author = {Shi, W and Chen, L and Zhu, X}, title = {Task Offloading Decision-Making Algorithm for Vehicular Edge Computing: A Deep-Reinforcement-Learning-Based Approach.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {17}, pages = {}, pmid = {37688051}, issn = {1424-8220}, support = {2022YFB3305500//National Key Research and Development Program of China/ ; 62273089//National Natural Science Foundation of China/ ; 62102080//National Natural Science Foundation of China/ ; BK20210204//Natural Science Foundation of Jiangsu Province/ ; }, abstract = {Efficient task offloading decision is a crucial technology in vehicular edge computing, which aims to fulfill the computational performance demands of complex vehicular tasks with respect to delay and energy consumption while minimizing network resource competition and consumption. Conventional distributed task offloading decisions rely solely on the local state of the vehicle, failing to optimize the utilization of the server's resources to its fullest potential. In addition, the mobility aspect of vehicles is often neglected in these decisions. In this paper, a cloud-edge-vehicle three-tier vehicular edge computing (VEC) system is proposed, where vehicles partially offload their computing tasks to edge or cloud servers while keeping the remaining tasks local to the vehicle terminals. Under the restrictions of vehicle mobility and discrete variables, task scheduling and task offloading proportion are jointly optimized with the objective of minimizing the total system cost. Considering the non-convexity, high-dimensional complex state and continuous action space requirements of the optimization problem, we propose a task offloading decision-making algorithm based on deep deterministic policy gradient (TODM_DDPG). TODM_DDPG algorithm adopts the actor-critic framework in which the actor network outputs floating point numbers to represent deterministic policy, while the critic network evaluates the action output by the actor network, and adjusts the network evaluation policy according to the rewards with the environment to maximize the long-term reward. To explore the algorithm performance, this conduct parameter setting experiments to correct the algorithm core hyper-parameters and select the optimal combination of parameters. In addition, in order to verify algorithm performance, we also carry out a series of comparative experiments with baseline algorithms. The results demonstrate that in terms of reducing system costs, the proposed algorithm outperforms the compared baseline algorithm, such as the deep Q network (DQN) and the actor-critic (AC), and the performance is improved by about 13% on average.}, } @article {pmid37687890, year = {2023}, author = {Zhou, W and Qian, Z and Ni, X and Tang, Y and Guo, H and Zhuang, S}, title = {Dense Convolutional Neural Network for Identification of Raman Spectra.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {17}, pages = {}, pmid = {37687890}, issn = {1424-8220}, support = {21010502900//Science and Technology Commission of Shanghai Municipality/ ; }, abstract = {The rapid development of cloud computing and deep learning makes the intelligent modes of applications widespread in various fields. The identification of Raman spectra can be realized in the cloud, due to its powerful computing, abundant spectral databases and advanced algorithms. Thus, it can reduce the dependence on the performance of the terminal instruments. However, the complexity of the detection environment can cause great interferences, which might significantly decrease the identification accuracies of algorithms. In this paper, a deep learning algorithm based on the Dense network has been proposed to satisfy the realization of this vision. The proposed Dense convolutional neural network has a very deep structure of over 40 layers and plenty of parameters to adjust the weight of different wavebands. In the kernel Dense blocks part of the network, it has a feed-forward fashion of connection for each layer to every other layer. It can alleviate the gradient vanishing or explosion problems, strengthen feature propagations, encourage feature reuses and enhance training efficiency. The network's special architecture mitigates noise interferences and ensures precise identification. The Dense network shows more accuracy and robustness compared to other CNN-based algorithms. We set up a database of 1600 Raman spectra consisting of 32 different types of liquid chemicals. They are detected using different postures as examples of interfered Raman spectra. In the 50 repeated training and testing sets, the Dense network can achieve a weighted accuracy of 99.99%. We have also tested the RRUFF database and the Dense network has a good performance. The proposed approach advances cloud-enabled Raman spectra identification, offering improved accuracy and adaptability for diverse identification tasks.}, } @article {pmid37687870, year = {2023}, author = {Sangaiah, AK and Javadpour, A and Pinto, P and Chiroma, H and Gabralla, LA}, title = {Cost-Effective Resources for Computing Approximation Queries in Mobile Cloud Computing Infrastructure.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {17}, pages = {}, pmid = {37687870}, issn = {1424-8220}, abstract = {Answering a query through a peer-to-peer database presents one of the greatest challenges due to the high cost and time required to obtain a comprehensive response. Consequently, these systems were primarily designed to handle approximation queries. In our research, the primary objective was to develop an intelligent system capable of responding to approximate set-value inquiries. This paper explores the use of particle optimization to enhance the system's intelligence. In contrast to previous studies, our proposed method avoids the use of sampling. Despite the utilization of the best sampling methods, there remains a possibility of error, making it difficult to guarantee accuracy. Nonetheless, achieving a certain degree of accuracy is crucial in handling approximate queries. Various factors influence the accuracy of sampling procedures. The results of our studies indicate that the suggested method has demonstrated improvements in terms of the number of queries issued, the number of peers examined, and its execution time, which is significantly faster than the flood approach. Answering queries poses one of the most arduous challenges in peer-to-peer databases, as obtaining a complete answer is both costly and time-consuming. Consequently, approximation queries have been adopted as a solution in these systems. Our research evaluated several methods, including flood algorithms, parallel diffusion algorithms, and ISM algorithms. When it comes to query transmission, the proposed method exhibits superior cost-effectiveness and execution times.}, } @article {pmid37687784, year = {2023}, author = {Alsemmeari, RA and Dahab, MY and Alturki, B and Alsulami, AA and Alsini, R}, title = {Towards an Effective Service Allocation in Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {17}, pages = {}, pmid = {37687784}, issn = {1424-8220}, support = {IFPIP: 1033-611-1443//Deanship of Scientific Research (DSR) at King Abdulaziz University, Jeddah/ ; }, abstract = {The Internet of Things (IoT) generates a large volume of data whenever devices are interconnected and exchange data across a network. Consequently, a variety of services with diverse needs arises, including capacity requirements, data quality, and latency demands. These services operate on fog computing devices, which are limited in power and bandwidth compared to the cloud. The primary challenge lies in determining the optimal location for service implementation: in the fog, in the cloud, or in a hybrid setup. This paper introduces an efficient allocation technique that moves processing closer to the network's fog side. It explores the optimal allocation of devices and services while maintaining resource utilization within an IoT architecture. The paper also examines the significance of allocating services to devices and optimizing resource utilization in fog computing. In IoT scenarios, where a wide range of services and devices coexist, it becomes crucial to effectively assign services to devices. We propose priority-based service allocation (PSA) and sort-based service allocation (SSA) techniques, which are employed to determine the optimal order for the utilizing devices to perform different services. Experimental results demonstrate that our proposed technique reduces data communication over the network by 88%, which is achieved by allocating most services locally in the fog. We increased the distribution of services to fog devices by 96%, while simultaneously minimizing the wastage of fog resources.}, } @article {pmid37679146, year = {2023}, author = {Tian, L and Shang, F and Gan, C}, title = {Optimal control analysis of malware propagation in cloud environments.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {20}, number = {8}, pages = {14502-14517}, doi = {10.3934/mbe.2023649}, pmid = {37679146}, issn = {1551-0018}, abstract = {Cloud computing has become a widespread technology that delivers a broad range of services across various industries globally. One of the crucial features of cloud infrastructure is virtual machine (VM) migration, which plays a pivotal role in resource allocation flexibility and reducing energy consumption, but it also provides convenience for the fast propagation of malware. To tackle the challenge of curtailing the proliferation of malware in the cloud, this paper proposes an effective strategy based on optimal dynamic immunization using a controlled dynamical model. The objective of the research is to identify the most efficient way of dynamically immunizing the cloud to minimize the spread of malware. To achieve this, we define the control strategy and loss and give the corresponding optimal control problem. The optimal control analysis of the controlled dynamical model is examined theoretically and experimentally. Finally, the theoretical and experimental results both demonstrate that the optimal strategy can minimize the incidence of infections at a reasonable loss.}, } @article {pmid37676890, year = {2023}, author = {Niu, S and Dong, R and Fang, L}, title = {Certificateless broadcast signcryption scheme supporting equality test in smart grid.}, journal = {PloS one}, volume = {18}, number = {9}, pages = {e0290666}, pmid = {37676890}, issn = {1932-6203}, mesh = {*Algorithms ; *Cloud Computing ; Internet ; Privacy ; Trust ; }, abstract = {With the development of cloud computing and the application of Internet of Things (IoT) in the smart grid, a massive amount of sensitive data is produced by the terminal equipment. This vast amount of data is subject to various attacks during transmission, from which users must be protected. However, most of the existing schemes require a large amount of network bandwidth resources and cannot ensure the receiver's anonymity. To solve these shortcomings, we construct a broadcast signcryption scheme supporting equality test based on certificateless cryptosystem. The scheme employs a symmetric encryption algorithm to improve encryption and transmission efficiency; The Lagrange interpolation theorem is used to encrypt the user's identity to ensure the privacy preservation of terminal devices; And a trusted third party is used to eliminate duplicated ciphertext for identical messages using an equality test, resulting in efficient network bandwidth utilization. Experimental analysis shows that our work has greater advantages in the field of practical broadcast services.}, } @article {pmid37672552, year = {2023}, author = {, }, title = {Retraction: Construction and optimization of inventory management system via cloud-edge collaborative computing in supply chain environment in the Internet of Things era.}, journal = {PloS one}, volume = {18}, number = {9}, pages = {e0291318}, pmid = {37672552}, issn = {1932-6203}, } @article {pmid37669969, year = {2023}, author = {Zhao, Y and Ye, H}, title = {Power system low delay resource scheduling model based on edge computing node.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {14634}, pmid = {37669969}, issn = {2045-2322}, abstract = {As more and more intelligent devices are put into the field of power system, the number of connected nodes in the power network is increasing exponentially. Under the background of smart grid cooperation across power areas and voltage levels, how to effectively process the massive data generated by smart grid has become a difficult problem to ensure the stable operation of power system. In the complex calculation process of power system, the operation time of complex calculation can not be shortened to the greatest extent, and the execution efficiency can not be improved. Therefore, this paper proposes a two-phase heuristic algorithm based on edge computing. In solving the virtual machine sequence problem, for the main partition and the coordination partition, the critical path algorithm is used to sort the virtual machines to minimize the computing time. For other sub-partitions, the minimum cut algorithm is used to reduce the traffic interaction of each sub-partition. In the second stage of the virtual machine placement process, an improved best fit algorithm is used to avoid poor placement of virtual machines across physical machine configurations, resulting in increased computing time. Through the experiment on the test system, it is proved that the calculation efficiency is improved when the coordinated partition calculation belongs to the target partition. Because the edge computing is closer to the data source, it can save more data transmission time than cloud computing. This paper provides an effective algorithm for power system distributed computing in virtual machine configuration in edge computing, which can effectively reduce the computing time of power system and improve the efficiency of system resource utilization.}, } @article {pmid37662679, year = {2023}, author = {Healthcare Engineering, JO}, title = {Retracted: Construction and Clinical Application Effect of General Surgery Patient-Oriented Nursing Information Platform Using Cloud Computing.}, journal = {Journal of healthcare engineering}, volume = {2023}, number = {}, pages = {9784736}, pmid = {37662679}, issn = {2040-2309}, abstract = {[This retracts the article DOI: 10.1155/2022/8273701.].}, } @article {pmid37649809, year = {2023}, author = {Zainudin, H and Koufos, K and Lee, G and Jiang, L and Dianati, M}, title = {Impact analysis of cooperative perception on the performance of automated driving in unsignalized roundabouts.}, journal = {Frontiers in robotics and AI}, volume = {10}, number = {}, pages = {1164950}, pmid = {37649809}, issn = {2296-9144}, abstract = {This paper reports the implementation and results of a simulation-based analysis of the impact of cloud/edge-enabled cooperative perception on the performance of automated driving in unsignalized roundabouts. This is achieved by comparing the performance of automated driving assisted by cooperative perception to that of a baseline system, where the automated vehicle relies only on its onboard sensing and perception for motion planning and control. The paper first provides the descriptions of the implemented simulation model, which integrates the SUMO road traffic generator and CARLA simulator. This includes descriptions of both the baseline and cooperative perception-assisted automated driving systems. We then define a set of relevant key performance indicators for traffic efficiency, safety, and ride comfort, as well as simulation scenarios to collect relevant data for our analysis. This is followed by the description of simulation scenarios, presentation of the results, and discussions of the insights learned from the results.}, } @article {pmid37631822, year = {2023}, author = {Almudayni, Z and Soh, B and Li, A}, title = {Enhancing Energy Efficiency and Fast Decision Making for Medical Sensors in Healthcare Systems: An Overview and Novel Proposal.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {16}, pages = {}, pmid = {37631822}, issn = {1424-8220}, mesh = {*Conservation of Energy Resources ; Physical Phenomena ; *Algorithms ; Industry ; Decision Making ; }, abstract = {In the realm of the Internet of Things (IoT), a network of sensors and actuators collaborates to fulfill specific tasks. As the demand for IoT networks continues to rise, it becomes crucial to ensure the stability of this technology and adapt it for further expansion. Through an analysis of related works, including the feedback-based optimized fuzzy scheduling approach (FOFSA) algorithm, the adaptive task allocation technique (ATAT), and the osmosis load balancing algorithm (OLB), we identify their limitations in achieving optimal energy efficiency and fast decision making. To address these limitations, this research introduces a novel approach to enhance the processing time and energy efficiency of IoT networks. The proposed approach achieves this by efficiently allocating IoT data resources in the Mist layer during the early stages. We apply the approach to our proposed system known as the Mist-based fuzzy healthcare system (MFHS) that demonstrates promising potential to overcome the existing challenges and pave the way for the efficient industrial Internet of healthcare things (IIoHT) of the future.}, } @article {pmid37631769, year = {2023}, author = {Hamzei, M and Khandagh, S and Jafari Navimipour, N}, title = {A Quality-of-Service-Aware Service Composition Method in the Internet of Things Using a Multi-Objective Fuzzy-Based Hybrid Algorithm.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {16}, pages = {}, pmid = {37631769}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) represents a cutting-edge technical domain, encompassing billions of intelligent objects capable of bridging the physical and virtual worlds across various locations. IoT services are responsible for delivering essential functionalities. In this dynamic and interconnected IoT landscape, providing high-quality services is paramount to enhancing user experiences and optimizing system efficiency. Service composition techniques come into play to address user requests in IoT applications, allowing various IoT services to collaborate seamlessly. Considering the resource limitations of IoT devices, they often leverage cloud infrastructures to overcome technological constraints, benefiting from unlimited resources and capabilities. Moreover, the emergence of fog computing has gained prominence, facilitating IoT application processing in edge networks closer to IoT sensors and effectively reducing delays inherent in cloud data centers. In this context, our study proposes a cloud-/fog-based service composition for IoT, introducing a novel fuzzy-based hybrid algorithm. This algorithm ingeniously combines Ant Colony Optimization (ACO) and Artificial Bee Colony (ABC) optimization algorithms, taking into account energy consumption and Quality of Service (QoS) factors during the service selection process. By leveraging this fuzzy-based hybrid algorithm, our approach aims to revolutionize service composition in IoT environments by empowering intelligent decision-making capabilities and ensuring optimal user satisfaction. Our experimental results demonstrate the effectiveness of the proposed strategy in successfully fulfilling service composition requests by identifying suitable services. When compared to recently introduced methods, our hybrid approach yields significant benefits. On average, it reduces energy consumption by 17.11%, enhances availability and reliability by 8.27% and 4.52%, respectively, and improves the average cost by 21.56%.}, } @article {pmid37631746, year = {2023}, author = {Alasmari, MK and Alwakeel, SS and Alohali, YA}, title = {A Multi-Classifiers Based Algorithm for Energy Efficient Tasks Offloading in Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {16}, pages = {}, pmid = {37631746}, issn = {1424-8220}, abstract = {The IoT has connected a vast number of devices on a massive internet scale. With the rapid increase in devices and data, offloading tasks from IoT devices to remote Cloud data centers becomes unproductive and costly. Optimizing energy consumption in IoT devices while meeting deadlines and data constraints is challenging. Fog Computing aids efficient IoT task processing with proximity to nodes and lower service delay. Cloud task offloading occurs frequently due to Fog Computing's limited resources compared to remote Cloud, necessitating improved techniques for accurate categorization and distribution of IoT device task offloading in a hybrid IoT, Fog, and Cloud paradigm. This article explores relevant offloading strategies in Fog Computing and proposes MCEETO, an intelligent energy-aware allocation strategy, utilizing a multi-classifier-based algorithm for efficient task offloading by selecting optimal Fog Devices (FDs) for module placement. MCEETO decision parameters include task attributes, Fog node characteristics, network latency, and bandwidth. The method is evaluated using the iFogSim simulator and compared with edge-ward and Cloud-only strategies. The proposed solution is more energy-efficient, saving around 11.36% compared to Cloud-only and approximately 9.30% compared to the edge-ward strategy. Additionally, the MCEETO algorithm achieved a 67% and 96% reduction in network usage compared to both strategies.}, } @article {pmid37631678, year = {2023}, author = {Ashraf, M and Shiraz, M and Abbasi, A and Alqahtani, O and Badshah, G and Lasisi, A}, title = {Microservice Application Scheduling in Multi-Tiered Fog-Computing-Enabled IoT.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {16}, pages = {}, pmid = {37631678}, issn = {1424-8220}, support = {Grant number will be provide later.//Funder details will be provided later./ ; RGP2/394/44//Deanship of Scientific Research at King Khalid University/ ; }, abstract = {Fog computing extends mobile cloud computing facilities at the network edge, yielding low-latency application execution. To supplement cloud services, computationally intensive applications can be distributed on resource-constrained mobile devices by leveraging underutilized nearby resources to meet the latency and bandwidth requirements of application execution. Building upon this premise, it is necessary to investigate idle or underutilized resources that are present at the edge of the network. The utilization of a microservice architecture in IoT application development, with its increased granularity in service breakdown, provides opportunities for improved scalability, maintainability, and extensibility. In this research, the proposed schedule tackles the latency requirements of applications by identifying suitable upward migration of microservices within a multi-tiered fog computing infrastructure. This approach enables optimal utilization of network edge resources. Experimental validation is performed using the iFogSim2 simulator and the results are compared with existing baselines. The results demonstrate that compared to the edgewards approach, our proposed technique significantly improves the latency requirements of application execution, network usage, and energy consumption by 66.92%, 69.83%, and 4.16%, respectively.}, } @article {pmid37631666, year = {2023}, author = {Xiong, H and Yu, B and Yi, Q and He, C}, title = {End-Cloud Collaboration Navigation Planning Method for Unmanned Aerial Vehicles Used in Small Areas.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {16}, pages = {}, pmid = {37631666}, issn = {1424-8220}, abstract = {Unmanned aerial vehicle (UAV) collaboration has become the main means of indoor and outdoor regional search, railway patrol, and other tasks, and navigation planning is one of the key, albeit difficult, technologies. The purpose of UAV navigation planning is to plan reasonable trajectories for UAVs to avoid obstacles and reach the task area. Essentially, it is a complex optimization problem that requires the use of navigation planning algorithms to search for path-point solutions that meet the requirements under the guide of objective functions and constraints. At present, there are autonomous navigation modes of UAVs relying on airborne sensors and navigation control modes of UAVs relying on ground control stations (GCSs). However, due to the limitation of airborne processor computing power, and background command and control communication delay, a navigation planning method that takes into account accuracy and timeliness is needed. First, the navigation planning architecture of UAVs of end-cloud collaboration was designed. Then, the background cloud navigation planning algorithm of UAVs was designed based on the improved particle swarm optimization (PSO). Next, the navigation control algorithm of the UAV terminals was designed based on the multi-objective hybrid swarm intelligent optimization algorithm. Finally, the computer simulation and actual indoor-environment flight test based on small rotor UAVs were designed and conducted. The results showed that the proposed method is correct and feasible, and can improve the effectiveness and efficiency of navigation planning of UAVs.}, } @article {pmid37630088, year = {2023}, author = {Chen, J and Qiu, L and Zhu, Z and Sun, N and Huang, H and Ip, WH and Yung, KL}, title = {An Adaptive Infrared Small-Target-Detection Fusion Algorithm Based on Multiscale Local Gradient Contrast for Remote Sensing.}, journal = {Micromachines}, volume = {14}, number = {8}, pages = {}, pmid = {37630088}, issn = {2072-666X}, support = {2022296//Youth Innovation Promition Association of the Chinese Academy of Sciences/ ; }, abstract = {Space vehicles such as missiles and aircraft have relatively long tracking distances. Infrared (IR) detectors are used for small target detection. The target presents point target characteristics, which lack contour, shape, and texture information. The high-brightness cloud edge and high noise have an impact on the detection of small targets because of the complex background of the sky and ground environment. Traditional template-based filtering and local contrast-based methods do not distinguish between different complex background environments, and their strategy is to unify small-target template detection or to use absolute contrast differences; so, it is easy to have a high false alarm rate. It is necessary to study the detection and tracking methods in complex backgrounds and low signal-to-clutter ratios (SCRs). We use the complexity difference as a prior condition for detection in the background of thick clouds and ground highlight buildings. Then, we use the spatial domain filtering and improved local contrast joint algorithm to obtain a significant area. We also provide a new definition of gradient uniformity through the improvement of the local gradient method, which could further enhance the target contrast. It is important to distinguish between small targets, highlighted background edges, and noise. Furthermore, the method can be used for parallel computing. Compared with the traditional space filtering algorithm or local contrast algorithm, the flexible fusion strategy can achieve the rapid detection of small targets with a higher signal-to-clutter ratio gain (SCRG) and background suppression factor (BSF).}, } @article {pmid37627775, year = {2023}, author = {Choi, W and Choi, T and Heo, S}, title = {A Comparative Study of Automated Machine Learning Platforms for Exercise Anthropometry-Based Typology Analysis: Performance Evaluation of AWS SageMaker, GCP VertexAI, and MS Azure.}, journal = {Bioengineering (Basel, Switzerland)}, volume = {10}, number = {8}, pages = {}, pmid = {37627775}, issn = {2306-5354}, support = {INNO-2022-01//National Research Foundation of Korea/ ; }, abstract = {The increasing prevalence of machine learning (ML) and automated machine learning (AutoML) applications across diverse industries necessitates rigorous comparative evaluations of their predictive accuracies under various computational environments. The purpose of this research was to compare and analyze the predictive accuracy of several machine learning algorithms, including RNNs, LSTMs, GRUs, XGBoost, and LightGBM, when implemented on different platforms such as Google Colab Pro, AWS SageMaker, GCP Vertex AI, and MS Azure. The predictive performance of each model within its respective environment was assessed using performance metrics such as accuracy, precision, recall, F1-score, and log loss. All algorithms were trained on the same dataset and implemented on their specified platforms to ensure consistent comparisons. The dataset used in this study comprised fitness images, encompassing 41 exercise types and totaling 6 million samples. These images were acquired from AI-hub, and joint coordinate values (x, y, z) were extracted utilizing the Mediapipe library. The extracted values were then stored in a CSV format. Among the ML algorithms, LSTM demonstrated the highest performance, achieving an accuracy of 73.75%, precision of 74.55%, recall of 73.68%, F1-score of 73.11%, and a log loss of 0.71. Conversely, among the AutoML algorithms, XGBoost performed exceptionally well on AWS SageMaker, boasting an accuracy of 99.6%, precision of 99.8%, recall of 99.2%, F1-score of 99.5%, and a log loss of 0.014. On the other hand, LightGBM exhibited the poorest performance on MS Azure, achieving an accuracy of 84.2%, precision of 82.2%, recall of 81.8%, F1-score of 81.5%, and a log loss of 1.176. The unnamed algorithm implemented on GCP Vertex AI showcased relatively favorable results, with an accuracy of 89.9%, precision of 94.2%, recall of 88.4%, F1-score of 91.2%, and a log loss of 0.268. Despite LightGBM's lackluster performance on MS Azure, the GRU implemented in Google Colab Pro displayed encouraging results, yielding an accuracy of 88.2%, precision of 88.5%, recall of 88.1%, F1-score of 88.4%, and a log loss of 0.44. Overall, this study revealed significant variations in performance across different algorithms and platforms. Particularly, AWS SageMaker's implementation of XGBoost outperformed other configurations, highlighting the importance of carefully considering the choice of algorithm and computational environment in predictive tasks. To gain a comprehensive understanding of the factors contributing to these performance discrepancies, further investigations are recommended.}, } @article {pmid37624874, year = {2022}, author = {Hoang, V and Hung, LH and Perez, D and Deng, H and Schooley, R and Arumilli, N and Yeung, KY and Lloyd, W}, title = {Container Profiler: Profiling resource utilization of containerized big data pipelines.}, journal = {GigaScience}, volume = {12}, number = {}, pages = {}, pmid = {37624874}, issn = {2047-217X}, support = {R01 GM126019/GM/NIGMS NIH HHS/United States ; R03 AI159286/AI/NIAID NIH HHS/United States ; U24 HG012674/HG/NHGRI NIH HHS/United States ; }, mesh = {*Big Data ; Computational Biology ; Software ; Time Factors ; }, abstract = {BACKGROUND: This article presents the Container Profiler, a software tool that measures and records the resource usage of any containerized task. Our tool profiles the CPU, memory, disk, and network utilization of containerized tasks collecting over 60 Linux operating system metrics at the virtual machine, container, and process levels. The Container Profiler supports performing time-series profiling at a configurable sampling interval to enable continuous monitoring of the resources consumed by containerized tasks and pipelines.

RESULTS: To investigate the utility of the Container Profiler, we profile the resource utilization requirements of a multistage bioinformatics analytical pipeline (RNA sequencing using unique molecular identifiers). We examine profiling metrics to assess patterns of CPU, disk, and network resource utilization across the different stages of the pipeline. We also quantify the profiling overhead of our Container Profiler tool to assess the impact of profiling a running pipeline with different levels of profiling granularity, verifying that impacts are negligible.

CONCLUSIONS: The Container Profiler provides a useful tool that can be used to continuously monitor the resource consumption of long and complex containerized applications that run locally or on the cloud. This can help identify bottlenecks where more resources are needed to improve performance.}, } @article {pmid37624836, year = {2023}, author = {Meri, A and Hasan, MK and Dauwed, M and Jarrar, M and Aldujaili, A and Al-Bsheish, M and Shehab, S and Kareem, HM}, title = {Organizational and behavioral attributes' roles in adopting cloud services: An empirical study in the healthcare industry.}, journal = {PloS one}, volume = {18}, number = {8}, pages = {e0290654}, pmid = {37624836}, issn = {1932-6203}, mesh = {*Cloud Computing ; *Health Care Sector ; Behavior Control ; Internet ; Data Analysis ; }, abstract = {The need for cloud services has been raised globally to provide a platform for healthcare providers to efficiently manage their citizens' health records and thus provide treatment remotely. In Iraq, the healthcare records of public hospitals are increasing progressively with poor digital management. While recent works indicate cloud computing as a platform for all sectors globally, a lack of empirical evidence demands a comprehensive investigation to identify the significant factors that influence the utilization of cloud health computing. Here we provide a cost-effective, modular, and computationally efficient model of utilizing cloud computing based on the organization theory and the theory of reasoned action perspectives. A total of 105 key informant data were further analyzed. The partial least square structural equation modeling was used for data analysis to explore the effect of organizational structure variables on healthcare information technicians' behaviors to utilize cloud services. Empirical results revealed that Internet networks, software modularity, hardware modularity, and training availability significantly influence information technicians' behavioral control and confirmation. Furthermore, these factors positively impacted their utilization of cloud systems, while behavioral control had no significant effect. The importance-performance map analysis further confirms that these factors exhibit high importance in shaping user utilization. Our findings can provide a comprehensive and unified guide to policymakers in the healthcare industry by focusing on the significant factors in organizational and behavioral contexts to engage health information technicians in the development and implementation phases.}, } @article {pmid37623505, year = {2023}, author = {Gazerani, P}, title = {Intelligent Digital Twins for Personalized Migraine Care.}, journal = {Journal of personalized medicine}, volume = {13}, number = {8}, pages = {}, pmid = {37623505}, issn = {2075-4426}, abstract = {Intelligent digital twins closely resemble their real-life counterparts. In health and medical care, they enable the real-time monitoring of patients, whereby large amounts of data can be collected to produce actionable information. These powerful tools are constructed with the aid of artificial intelligence, machine learning, and deep learning; the Internet of Things; and cloud computing to collect a diverse range of digital data (e.g., from digital patient journals, wearable sensors, and digitized monitoring equipment or processes), which can provide information on the health conditions and therapeutic responses of their physical twins. Intelligent digital twins can enable data-driven clinical decision making and advance the realization of personalized care. Migraines are a highly prevalent and complex neurological disorder affecting people of all ages, genders, and geographical locations. It is ranked among the top disabling diseases, with substantial negative personal and societal impacts, but the current treatment strategies are suboptimal. Personalized care for migraines has been suggested to optimize their treatment. The implementation of intelligent digital twins for migraine care can theoretically be beneficial in supporting patient-centric care management. It is also expected that the implementation of intelligent digital twins will reduce costs in the long run and enhance treatment effectiveness. This study briefly reviews the concept of digital twins and the available literature on digital twins for health disorders such as neurological diseases. Based on these, the potential construction and utility of digital twins for migraines will then be presented. The potential and challenges when implementing intelligent digital twins for the future management of migraines are also discussed.}, } @article {pmid37621906, year = {2023}, author = {Chemistry, IJOA}, title = {Retracted: Residential Environment Pollution Monitoring System Based on Cloud Computing and Internet of Things.}, journal = {International journal of analytical chemistry}, volume = {2023}, number = {}, pages = {9858523}, pmid = {37621906}, issn = {1687-8760}, abstract = {[This retracts the article DOI: 10.1155/2022/1013300.].}, } @article {pmid37602873, year = {2023}, author = {Ahmed, MW and Hossainy, SJ and Khaliduzzaman, A and Emmert, JL and Kamruzzaman, M}, title = {Non-destructive optical sensing technologies for advancing the egg industry toward Industry 4.0: A review.}, journal = {Comprehensive reviews in food science and food safety}, volume = {22}, number = {6}, pages = {4378-4403}, doi = {10.1111/1541-4337.13227}, pmid = {37602873}, issn = {1541-4337}, mesh = {Animals ; Humans ; *Artificial Intelligence ; Quality Control ; *Animal Welfare ; Big Data ; }, abstract = {The egg is considered one of the best sources of dietary protein, and has an important role in human growth and development. With the increase in the world's population, per capita egg consumption is also increasing. Ground-breaking technological developments have led to numerous inventions like the Internet of Things (IoT), various optical sensors, robotics, artificial intelligence (AI), big data, and cloud computing, transforming the conventional industry into a smart and sustainable egg industry, also known as Egg Industry 4.0 (EI 4.0). The EI 4.0 concept has the potential to improve automation, enhance biosecurity, promote the safeguarding of animal welfare, increase intelligent grading and quality inspection, and increase efficiency. For a sustainable Industry 4.0 transformation, it is important to analyze available technologies, the latest research, existing limitations, and prospects. This review examines the existing non-destructive optical sensing technologies for the egg industry. It provides information and insights on the different components of EI 4.0, including emerging EI 4.0 technologies for egg production, quality inspection, and grading. Furthermore, drawbacks of current EI 4.0 technologies, potential workarounds, and future trends were critically analyzed. This review can help policymakers, industrialists, and academicians to better understand the integration of non-destructive technologies and automation. This integration has the potential to increase productivity, improve quality control, and optimize resource management toward sustainable development of the egg industry.}, } @article {pmid37593602, year = {2023}, author = {Rodrigues de Almeida, C and Garcia, N and Campos, JC and Alírio, J and Arenas-Castro, S and Gonçalves, A and Sillero, N and Teodoro, AC}, title = {Time-series analyses of land surface temperature changes with Google Earth Engine in a mountainous region.}, journal = {Heliyon}, volume = {9}, number = {8}, pages = {e18846}, pmid = {37593602}, issn = {2405-8440}, abstract = {Studying changes in temperature is fundamental for understanding its interactions with the environment and biodiversity. However, studies in mountainous areas are few, due to their complex formation and the difficulty of obtaining local data. We analysed changes in temperature over time in Montesinho Natural Park (MNP) (Bragança, Portugal), an important conservation area due to its high level of biodiversity. Specifically, we aimed to analyse: i) whether temperature increased in MNP over time, ii) what environmental factors influence the Land Surface Temperature (LST), and iii) whether vegetation is related to changes in temperature. We used annual summer and winter mean data acquired from the Moderate-Resolution Imaging Spectroradiometer (MODIS) datasets/products (e.g. LST, gathered at four different times: 11am, 1pm, 10pm and 2am, Enhance vegetation index - EVI, and Evapotranspiration - ET), available on the cloud-based platform Google Earth Engine between 2003 and 2021). We analysed the dynamics of the temporal trend patterns between the LST and local thermal data (from a weather station) by correlations; the trends in LST over time with the Mann-Kendall trend test; and the stability of hot spots and cold spots of LST with Local Statistics of Spatial Association (LISA) tests. The temporal trend patterns between LST and Air Temperature (Tair) data were very similar (ρ > 0.7). The temperature in the MNP remained stable over time during summer but increased during winter nights. The biophysical indices were strongly correlated with the summer LST at 11am and 1pm. The LISA results identified hot and cold zones that remained stable over time. The remote-sensed data proved to be efficient in measuring changes in temperature over time.}, } @article {pmid37593394, year = {2023}, author = {Environmental And Public Health, JO}, title = {Retracted: Sport Resource Classification Algorithm for Health Promotion Based on Cloud Computing: Rhythmic Gymnastics' Example.}, journal = {Journal of environmental and public health}, volume = {2023}, number = {}, pages = {9831318}, pmid = {37593394}, issn = {1687-9813}, abstract = {[This retracts the article DOI: 10.1155/2022/2587169.].}, } @article {pmid37593082, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: Computer Security Issues and Legal System Based on Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9892354}, pmid = {37593082}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/8112212.].}, } @article {pmid37587979, year = {2023}, author = {Alomair, L and Abolfotouh, MA}, title = {Awareness and Predictors of the Use of Bioinformatics in Genome Research in Saudi Arabia.}, journal = {International journal of general medicine}, volume = {16}, number = {}, pages = {3413-3425}, pmid = {37587979}, issn = {1178-7074}, abstract = {BACKGROUND: With the advances in genomics research, many countries still need more bioinformatics skills. This study aimed to assess the levels of awareness of bioinformatics and predictors of its use in genomics research among scientists in Saudi Arabia.

METHODS: In a cross-sectional survey, 309 scientists of different biological and biomedical specialties were subjected to a previously validated e-questionnaire to collect data on (1) Knowledge about bioinformatics programming languages and tools, (2) Attitude toward acceptance of bioinformatics resources in genome-related research, and (3) The pattern of information-seeking to online bioinformatics resources. Logistic regression analysis was applied to identify the predictors of using bioinformatics in research. Significance was set at p<0.05.

RESULTS: More than one-half (248, 56.4%) of all scientists reported a lack of bioinformatics knowledge. Most participants had a neutral attitude toward bioinformatics (295, 95.4%). The barriers facing acceptance of bioinformatics tools reported were; lack of training (210, 67.9%), insufficient support (180, 58.2%), and complexity of software (138, 44.6%). The limited experience was reported in; having one or more bioinformatics tools (98, 31.7%), using a supercomputer in their research inside (44, 14.2%) and outside Saudi Arabia (55, 17.8%), the need for developing a program to solve a biological problem (129, 41.7%), working in one or more fields of bioinformatics (93, 30.1%), using web applications (112, 36.2%), and using programming languages (102, 33.0%). Significant predictors of conducting genomics research were; younger scientists (p=0.039), Ph.D. education (p=0.003), more than five years of experience (p<0.05), previous training (p<0.001), and higher bioinformatics knowledge scores (p<0.001).

CONCLUSION: The study revealed a short knowledge, a neutral attitude, a lack of resources, and limited use of bioinformatics resources in genomics research. Education and training during each education level and during the job is recommended. Cloud-based resources may help scientists do research using publicly available Omics data. Further studies are necessary to evaluate collaboration among bioinformatics software developers and biologists.}, } @article {pmid37586146, year = {2023}, author = {Manson, EN and Hasford, F and Trauernicht, C and Ige, TA and Inkoom, S and Inyang, S and Samba, O and Khelassi-Toutaoui, N and Lazarus, G and Sosu, EK and Pokoo-Aikins, M and Stoeva, M}, title = {Africa's readiness for artificial intelligence in clinical radiotherapy delivery: Medical physicists to lead the way.}, journal = {Physica medica : PM : an international journal devoted to the applications of physics to medicine and biology : official journal of the Italian Association of Biomedical Physics (AIFB)}, volume = {113}, number = {}, pages = {102653}, doi = {10.1016/j.ejmp.2023.102653}, pmid = {37586146}, issn = {1724-191X}, mesh = {Humans ; *Artificial Intelligence ; *Radiation Oncology ; Machine Learning ; Curriculum ; Africa ; }, abstract = {BACKGROUND: There have been several proposals by researchers for the introduction of Artificial Intelligence (AI) technology due to its promising role in radiotherapy practice. However, prior to the introduction of the technology, there are certain general recommendations that must be achieved. Also, the current challenges of AI must be addressed. In this review, we assess how Africa is prepared for the integration of AI technology into radiotherapy service delivery.

METHODS: To assess the readiness of Africa for integration of AI in radiotherapy services delivery, a narrative review of the available literature from PubMed, Science Direct, Google Scholar, and Scopus was conducted in the English language using search terms such as Artificial Intelligence, Radiotherapy in Africa, Machine Learning, Deep Learning, and Quality Assurance.

RESULTS: We identified a number of issues that could limit the successful integration of AI technology into radiotherapy practice. The major issues include insufficient data for training and validation of AI models, lack of educational curriculum for AI radiotherapy-related courses, no/limited AI teaching professionals, funding, and lack of AI technology and resources. Solutions identified to facilitate smooth implementation of the technology into radiotherapy practices within the region include: creating an accessible national data bank, integrating AI radiotherapy training programs into Africa's educational curriculum, investing in AI technology and resources such as electronic health records and cloud storage, and creation of legal laws and policies to support the use of the technology. These identified solutions need to be implemented on the background of creating awareness among health workers within the radiotherapy space.

CONCLUSION: The challenges identified in this review are common among all the geographical regions in the African continent. Therefore, all institutions offering radiotherapy education and training programs, management of the medical centers for radiotherapy and oncology, national and regional professional bodies for medical physics, ministries of health, governments, and relevant stakeholders must take keen interest and work together to achieve this goal.}, } @article {pmid37579550, year = {2023}, author = {Aminizadeh, S and Heidari, A and Toumaj, S and Darbandi, M and Navimipour, NJ and Rezaei, M and Talebi, S and Azad, P and Unal, M}, title = {The applications of machine learning techniques in medical data processing based on distributed computing and the Internet of Things.}, journal = {Computer methods and programs in biomedicine}, volume = {241}, number = {}, pages = {107745}, doi = {10.1016/j.cmpb.2023.107745}, pmid = {37579550}, issn = {1872-7565}, mesh = {Humans ; *COVID-19 ; *Internet of Things ; Algorithms ; Cloud Computing ; Machine Learning ; }, abstract = {Medical data processing has grown into a prominent topic in the latest decades with the primary goal of maintaining patient data via new information technologies, including the Internet of Things (IoT) and sensor technologies, which generate patient indexes in hospital data networks. Innovations like distributed computing, Machine Learning (ML), blockchain, chatbots, wearables, and pattern recognition can adequately enable the collection and processing of medical data for decision-making in the healthcare era. Particularly, to assist experts in the disease diagnostic process, distributed computing is beneficial by digesting huge volumes of data swiftly and producing personalized smart suggestions. On the other side, the current globe is confronting an outbreak of COVID-19, so an early diagnosis technique is crucial to lowering the fatality rate. ML systems are beneficial in aiding radiologists in examining the incredible amount of medical images. Nevertheless, they demand a huge quantity of training data that must be unified for processing. Hence, developing Deep Learning (DL) confronts multiple issues, such as conventional data collection, quality assurance, knowledge exchange, privacy preservation, administrative laws, and ethical considerations. In this research, we intend to convey an inclusive analysis of the most recent studies in distributed computing platform applications based on five categorized platforms, including cloud computing, edge, fog, IoT, and hybrid platforms. So, we evaluated 27 articles regarding the usage of the proposed framework, deployed methods, and applications, noting the advantages, drawbacks, and the applied dataset and screening the security mechanism and the presence of the Transfer Learning (TL) method. As a result, it was proved that most recent research (about 43%) used the IoT platform as the environment for the proposed architecture, and most of the studies (about 46%) were done in 2021. In addition, the most popular utilized DL algorithm was the Convolutional Neural Network (CNN), with a percentage of 19.4%. Hence, despite how technology changes, delivering appropriate therapy for patients is the primary aim of healthcare-associated departments. Therefore, further studies are recommended to develop more functional architectures based on DL and distributed environments and better evaluate the present healthcare data analysis models.}, } @article {pmid37576291, year = {2023}, author = {Hu, X}, title = {The role of deep learning in the innovation of smart classroom teaching mode under the background of internet of things and fuzzy control.}, journal = {Heliyon}, volume = {9}, number = {8}, pages = {e18594}, pmid = {37576291}, issn = {2405-8440}, abstract = {Electronic components are rapidly updated in the context of expanding application requirements, and communication protocols used in combination with various electronic devices are also emerging. On this basis, IoT technology has developed a variety of sensor devices and gateways, which are widely used in cities. In the field of wisdom, applying IoT technology to classrooms can effectively improve the deficiencies of traditional teaching models. Fuzzy control theory is usually based on fuzzy sets in mathematics, and is combined with neural network, genetic and probability algorithms to form a calculation method. Fuzzy calculation has the ability to simplify the system input of a variety of complex variables, and its applications in the field of education are mainly: provide evaluation of teachers' teaching effectiveness. The advancement of science and technology has promoted the change and updating of the teaching mode. With the continuous advancement of basic education curriculum reform and the continuous deepening of classroom teaching reform, classroom teaching is also in urgent need of reform, from traditional classrooms to smart classrooms. Smart classrooms combine advanced technology with teachers' teaching. Through the dynamic data, the analysis instantly understands the student's learning situation, and then integrates it into education and teaching in a targeted manner. This paper conducts a questionnaire survey on the current situation of smart classroom teaching, and summarizes the current teaching problems. Then, combining the Internet of Things, fuzzy control and deep learning technology, from the two aspects of school teachers and students, it is proposed for smart classroom to promote students' learning effect. With its novel and new-style teaching advantages, smart classroom has gradually entered the public's vision and gained the attention and support of the majority of educators. Taking Grand Wisdom Classroom as an example, it uses the "Internet +" way of thinking and the new generation of information technology such as big data and cloud computing to create intelligent and efficient classrooms, realizing the whole process of application before, during and after class, and promoting the development of students' wisdom. Under the mobile Internet model, students and teachers can communicate anytime and anywhere. Combined with the analysis and application of our big data technology, data-based precision teaching becomes possible. In a real sense, learning before teaching can be realized and teaching can be determined by learning.}, } @article {pmid37575364, year = {2023}, author = {Sauerwein, N and Orsi, F and Uhrich, P and Bandyopadhyay, S and Mattiotti, F and Cantat-Moltrecht, T and Pupillo, G and Hauke, P and Brantut, JP}, title = {Engineering random spin models with atoms in a high-finesse cavity.}, journal = {Nature physics}, volume = {19}, number = {8}, pages = {1128-1134}, pmid = {37575364}, issn = {1745-2473}, abstract = {All-to-all interacting, disordered quantum many-body models have a wide range of applications across disciplines, from spin glasses in condensed-matter physics over holographic duality in high-energy physics to annealing algorithms in quantum computing. Typically, these models are abstractions that do not find unambiguous physical realizations in nature. Here we realize an all-to-all interacting, disordered spin system by subjecting an atomic cloud in a cavity to a controllable light shift. Adjusting the detuning between atom resonance and cavity mode, we can tune between disordered versions of a central-mode model and a Lipkin-Meshkov-Glick model. By spectroscopically probing the low-energy excitations of the system, we explore the competition of interactions with disorder across a broad parameter range. We show how disorder in the central-mode model breaks the strong collective coupling, making the dark-state manifold cross over to a random distribution of weakly mixed light-matter, 'grey', states. In the Lipkin-Meshkov-Glick model, the ferromagnetic finite-sized ground state evolves towards a paramagnet as disorder is increased. In that regime, semi-localized eigenstates emerge, as we observe by extracting bounds on the participation ratio. These results present substantial steps towards freely programmable cavity-mediated interactions for the design of arbitrary spin Hamiltonians.}, } @article {pmid37571718, year = {2023}, author = {Torres-Hernández, MA and Escobedo-Barajas, MH and Guerrero-Osuna, HA and Ibarra-Pérez, T and Solís-Sánchez, LO and Martínez-Blanco, MDR}, title = {Performance Analysis of Embedded Multilayer Perceptron Artificial Neural Networks on Smart Cyber-Physical Systems for IoT Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {15}, pages = {}, pmid = {37571718}, issn = {1424-8220}, support = {CPE/COTEBAL/38/2022//Instituto Politécnico Nacional/ ; 1012152//Consejo Nacional de Ciencia y Tecnología/ ; }, abstract = {At present, modern society is experiencing a significant transformation. Thanks to the digitization of society and manufacturing, mainly because of a combination of technologies, such as the Internet of Things, cloud computing, machine learning, smart cyber-physical systems, etc., which are making the smart factory and Industry 4.0 a reality. Currently, most of the intelligence of smart cyber-physical systems is implemented in software. For this reason, in this work, we focused on the artificial intelligence software design of this technology, one of the most complex and critical. This research aimed to study and compare the performance of a multilayer perceptron artificial neural network designed for solving the problem of character recognition in three implementation technologies: personal computers, cloud computing environments, and smart cyber-physical systems. After training and testing the multilayer perceptron, training time and accuracy tests showed each technology has particular characteristics and performance. Nevertheless, the three technologies have a similar performance of 97% accuracy, despite a difference in the training time. The results show that the artificial intelligence embedded in fog technology is a promising alternative for developing smart cyber-physical systems.}, } @article {pmid37571716, year = {2023}, author = {Fernández-Urrutia, M and Arbelo, M and Gil, A}, title = {Identification of Paddy Croplands and Its Stages Using Remote Sensors: A Systematic Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {15}, pages = {}, pmid = {37571716}, issn = {1424-8220}, abstract = {Rice is a staple food that feeds nearly half of the world's population. With the population of our planet expected to keep growing, it is crucial to carry out accurate mapping, monitoring, and assessments since these could significantly impact food security, climate change, spatial planning, and land management. Using the PRISMA systematic review protocol, this article identified and selected 122 scientific articles (journals papers and conference proceedings) addressing different remote sensing-based methodologies to map paddy croplands, published between 2010 and October 2022. This analysis includes full coverage of the mapping of rice paddies and their various stages of crop maturity. This review paper classifies the methods based on the data source: (a) multispectral (62%), (b) multisource (20%), and (c) radar (18%). Furthermore, it analyses the impact of machine learning on those methodologies and the most common algorithms used. We found that MODIS (28%), Sentinel-2 (18%), Sentinel-1 (15%), and Landsat-8 (11%) were the most used sensors. The impact of Sentinel-1 on multisource solutions is also increasing due to the potential of backscatter information to determine textures in different stages and decrease cloud cover constraints. The preferred solutions include phenology algorithms via the use of vegetation indices, setting thresholds, or applying machine learning algorithms to classify images. In terms of machine learning algorithms, random forest is the most used (17 times), followed by support vector machine (12 times) and isodata (7 times). With the continuous development of technology and computing, it is expected that solutions such as multisource solutions will emerge more frequently and cover larger areas in different locations and at a higher resolution. In addition, the continuous improvement of cloud detection algorithms will positively impact multispectral solutions.}, } @article {pmid37571695, year = {2023}, author = {Ahamed, Z and Khemakhem, M and Eassa, F and Alsolami, F and Basuhail, A and Jambi, K}, title = {Deep Reinforcement Learning for Workload Prediction in Federated Cloud Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {15}, pages = {}, pmid = {37571695}, issn = {1424-8220}, support = {RG-9-611-38//King Abdulaziz University/ ; }, abstract = {The Federated Cloud Computing (FCC) paradigm provides scalability advantages to Cloud Service Providers (CSP) in preserving their Service Level Agreement (SLA) as opposed to single Data Centers (DC). However, existing research has primarily focused on Virtual Machine (VM) placement, with less emphasis on energy efficiency and SLA adherence. In this paper, we propose a novel solution, Federated Cloud Workload Prediction with Deep Q-Learning (FEDQWP). Our solution addresses the complex VM placement problem, energy efficiency, and SLA preservation, making it comprehensive and beneficial for CSPs. By leveraging the capabilities of deep learning, our FEDQWP model extracts underlying patterns and optimizes resource allocation. Real-world workloads are extensively evaluated to demonstrate the efficacy of our approach compared to existing solutions. The results show that our DQL model outperforms other algorithms in terms of CPU utilization, migration time, finished tasks, energy consumption, and SLA violations. Specifically, our QLearning model achieves efficient CPU utilization with a median value of 29.02, completes migrations in an average of 0.31 units, finishes an average of 699 tasks, consumes the least energy with an average of 1.85 kWh, and exhibits the lowest number of SLA violations with an average of 0.03 violations proportionally. These quantitative results highlight the superiority of our proposed method in optimizing performance in FCC environments.}, } @article {pmid37571606, year = {2023}, author = {Zhang, D and Zhong, Z and Xia, Y and Wang, Z and Xiong, W}, title = {An Automatic Classification System for Environmental Sound in Smart Cities.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {15}, pages = {}, pmid = {37571606}, issn = {1424-8220}, abstract = {With the continuous promotion of "smart cities" worldwide, the approach to be used in combining smart cities with modern advanced technologies (Internet of Things, cloud computing, artificial intelligence) has become a hot topic. However, due to the non-stationary nature of environmental sound and the interference of urban noise, it is challenging to fully extract features from the model with a single input and achieve ideal classification results, even with deep learning methods. To improve the recognition accuracy of ESC (environmental sound classification), we propose a dual-branch residual network (dual-resnet) based on feature fusion. Furthermore, in terms of data pre-processing, a loop-padding method is proposed to patch shorter data, enabling it to obtain more useful information. At the same time, in order to prevent the occurrence of overfitting, we use the time-frequency data enhancement method to expand the dataset. After uniform pre-processing of all the original audio, the dual-branch residual network automatically extracts the frequency domain features of the log-Mel spectrogram and log-spectrogram. Then, the two different audio features are fused to make the representation of the audio features more comprehensive. The experimental results show that compared with other models, the classification accuracy of the UrbanSound8k dataset has been improved to different degrees.}, } @article {pmid37571545, year = {2023}, author = {Ali, A and Al-Rimy, BAS and Alsubaei, FS and Almazroi, AA and Almazroi, AA}, title = {HealthLock: Blockchain-Based Privacy Preservation Using Homomorphic Encryption in Internet of Things Healthcare Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {15}, pages = {}, pmid = {37571545}, issn = {1424-8220}, support = {MoE-IF-UJ-22-0708-2//Deputyship for Research Innovation, Ministry of Education in Saudi Arabia/ ; }, mesh = {Humans ; *Blockchain ; Privacy ; Computer Security ; *Internet of Things ; Delivery of Health Care ; }, abstract = {The swift advancement of the Internet of Things (IoT), coupled with the growing application of healthcare software in this area, has given rise to significant worries about the protection and confidentiality of critical health data. To address these challenges, blockchain technology has emerged as a promising solution, providing decentralized and immutable data storage and transparent transaction records. However, traditional blockchain systems still face limitations in terms of preserving data privacy. This paper proposes a novel approach to enhancing privacy preservation in IoT-based healthcare applications using homomorphic encryption techniques combined with blockchain technology. Homomorphic encryption facilitates the performance of calculations on encrypted data without requiring decryption, thus safeguarding the data's privacy throughout the computational process. The encrypted data can be processed and analyzed by authorized parties without revealing the actual contents, thereby protecting patient privacy. Furthermore, our approach incorporates smart contracts within the blockchain network to enforce access control and to define data-sharing policies. These smart contracts provide fine-grained permission settings, which ensure that only authorized entities can access and utilize the encrypted data. These settings protect the data from being viewed by unauthorized parties. In addition, our system generates an audit record of all data transactions, which improves both accountability and transparency. We have provided a comparative evaluation with the standard models, taking into account factors such as communication expense, transaction volume, and security. The findings of our experiments suggest that our strategy protects the confidentiality of the data while at the same time enabling effective data processing and analysis. In conclusion, the combination of homomorphic encryption and blockchain technology presents a solution that is both resilient and protective of users' privacy for healthcare applications integrated with IoT. This strategy offers a safe and open setting for the management and exchange of sensitive patient medical data, while simultaneously preserving the confidentiality of the patients involved.}, } @article {pmid37571543, year = {2023}, author = {Waleed, M and Kamal, T and Um, TW and Hafeez, A and Habib, B and Skouby, KE}, title = {Unlocking Insights in IoT-Based Patient Monitoring: Methods for Encompassing Large-Data Challenges.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {15}, pages = {}, pmid = {37571543}, issn = {1424-8220}, support = {2020-0-00833//This work was supported by the Institute of Information & communications Technology Planning & Evaluation (IITP) grant funded by the South Korea government (MSIT)/ ; }, mesh = {Humans ; *Internet of Things ; Data Collection ; Data Visualization ; Health Personnel ; Monitoring, Physiologic ; }, abstract = {The remote monitoring of patients using the internet of things (IoT) is essential for ensuring continuous observation, improving healthcare, and decreasing the associated costs (i.e., reducing hospital admissions and emergency visits). There has been much emphasis on developing methods and approaches for remote patient monitoring using IoT. Most existing frameworks cover parts or sub-parts of the overall system but fail to provide a detailed and well-integrated model that covers different layers. The leverage of remote monitoring tools and their coupling with health services requires an architecture that handles data flow and enables significant interventions. This paper proposes a cloud-based patient monitoring model that enables IoT-generated data collection, storage, processing, and visualization. The system has three main parts: sensing (IoT-enabled data collection), network (processing functions and storage), and application (interface for health workers and caretakers). In order to handle the large IoT data, the sensing module employs filtering and variable sampling. This pre-processing helps reduce the data received from IoT devices and enables the observation of four times more patients compared to not using edge processing. We also discuss the flow of data and processing, thus enabling the deployment of data visualization services and intelligent applications.}, } @article {pmid37571451, year = {2023}, author = {Saeed, S and Altamimi, SA and Alkayyal, NA and Alshehri, E and Alabbad, DA}, title = {Digital Transformation and Cybersecurity Challenges for Businesses Resilience: Issues and Recommendations.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {15}, pages = {}, pmid = {37571451}, issn = {1424-8220}, support = {000//Saudi Aramco Cybersecurity Chair, Imam Abdulrahman Bin Faisal University/ ; }, abstract = {This systematic literature review explores the digital transformation (DT) and cybersecurity implications for achieving business resilience. DT involves transitioning organizational processes to IT solutions, which can result in significant changes across various aspects of an organization. However, emerging technologies such as artificial intelligence, big data and analytics, blockchain, and cloud computing drive digital transformation worldwide while increasing cybersecurity risks for businesses undergoing this process. This literature survey article highlights the importance of comprehensive knowledge of cybersecurity threats during DT implementation to prevent interruptions due to malicious activities or unauthorized access by attackers aiming at sensitive information alteration, destruction, or extortion from users. Cybersecurity is essential to DT as it protects digital assets from cyber threats. We conducted a systematic literature review using the PRISMA methodology in this research. Our literature review found that DT has increased efficiency and productivity but poses new challenges related to cybersecurity risks, such as data breaches and cyber-attacks. We conclude by discussing future vulnerabilities associated with DT implementation and provide recommendations on how organizations can mitigate these risks through effective cybersecurity measures. The paper recommends a staged cybersecurity readiness framework for business organizations to be prepared to pursue digital transformation.}, } @article {pmid37566992, year = {2023}, author = {Abdul-Rahman, T and Ghosh, S and Lukman, L and Bamigbade, GB and Oladipo, OV and Amarachi, OR and Olanrewaju, OF and Toluwalashe, S and Awuah, WA and Aborode, AT and Lizano-Jubert, I and Audah, KA and Teslyk, TP}, title = {Inaccessibility and low maintenance of medical data archive in low-middle income countries: Mystery behind public health statistics and measures.}, journal = {Journal of infection and public health}, volume = {16}, number = {10}, pages = {1556-1561}, doi = {10.1016/j.jiph.2023.07.001}, pmid = {37566992}, issn = {1876-035X}, mesh = {Humans ; *Developing Countries ; *Public Health ; Retrospective Studies ; Africa ; }, abstract = {INTRODUCTION: Africa bears the largest burden of communicable and non-communicable diseases globally, yet it contributes only about 1 % of global research output, partly because of inaccessibility and low maintenance of medical data. Data is widely recognized as a crucial tool for improvement of population health. Despite the introduction of electronic health data systems in low-and middle-income countries (LMICs) to improve data quality, some LMICs still lack an efficient system to collect and archive data. This study aims to examine the underlying causes of data archive inaccessibility and poor maintenance in LMICS, and to highlight sustainable mitigation measures.

METHOD: Authors conducted a comprehensive search on PubMed, Google scholar, organization websites using the search string "data archive" or "medical data" or "public health statistics" AND "challenges" AND "maintenance" AND "Low Middle Income Countries" or "LMIC". to Identify relevant studies and reports to be included in our review. All articles related data archive in low and middle income countries were considered without restrictions due to scarcity of data.

RESULT: Medical data archives in LMICs face challenges impacting data quality. Insufficient training, organizational constraints, and limited infrastructure hinder archive maintenance. To improve, support for public datasets, digital literacy, and technology infrastructure is needed. Standardization, cloud solutions, and advanced technologies can enhance data management, while capacity building and training programs are crucial.

CONCLUSION: The creation and maintenance of data archives to facilitate the storage of retrospective datasets is critical to create reliable and consistent data to better equip the development of resilient health systems and surveillance of diseases in LMICs.}, } @article {pmid37566590, year = {2023}, author = {H S, M and T, SK and Gupta, P and McArdle, G}, title = {A Harris Hawk Optimisation system for energy and resource efficient virtual machine placement in cloud data centers.}, journal = {PloS one}, volume = {18}, number = {8}, pages = {e0289156}, pmid = {37566590}, issn = {1932-6203}, mesh = {Animals ; *Algorithms ; Cloud Computing ; Computer Simulation ; Workload ; *Falconiformes ; }, abstract = {Virtualisation is a major technology in cloud computing for optimising the cloud data centre's power usage. In the current scenario, most of the services are migrated to the cloud, putting more load on the cloud data centres. As a result, the data center's size expands resulting in increased energy usage. To address this problem, a resource allocation optimisation method that is both efficient and effective is necessary. The optimal utilisation of cloud infrastructure and optimisation algorithms plays a vital role. The cloud resources rely on the allocation policy of the virtual machine on cloud resources. A virtual machine placement technique, based on the Harris Hawk Optimisation (HHO) model for the cloud data centre is presented in this paper. The proposed HHO model aims to find the best place for virtual machines on suitable hosts with the least load and power consumption. PlanetLab's real-time workload traces are used for performance evaluation with existing PSO (Particle Swarm Optimisation) and PABFD (Best Fit Decreasing). The performance evaluation of the proposed method is done using power consumption, SLA, CPU utilisation, RAM utilisation, Execution time (ms) and the number of VM migrations. The performance evaluation is done using two simulation scenarios with scaling workload in scenario 1 and increasing resources for the virtual machine to study the performance in underloaded and overloaded conditions. Experimental results show that the proposed HHO algorithm improved execution time(ms) by 4%, had a 27% reduction in power consumption, a 16% reduction in SLA violation and an increase in resource utilisation by 17%. The HHO algorithm is also effective in handling dynamic and uncertain environments, making it suitable for real-world cloud infrastructures.}, } @article {pmid37564538, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: Cloud Computing to Tourism Economic Data Scheduling Algorithm under the Background of Image and Video.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9815205}, pmid = {37564538}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/3948221.].}, } @article {pmid37564503, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: Construction of Economic Security Early Warning System Based on Cloud Computing and Data Mining.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9831835}, pmid = {37564503}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/2080840.].}, } @article {pmid37564485, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: Public Security Video Image Detection System Construction Platform in Cloud Computing Environment.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9873483}, pmid = {37564485}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/4113803.].}, } @article {pmid37560357, year = {2023}, author = {Ouyang, W and Eliceiri, KW and Cimini, BA}, title = {Moving beyond the desktop: prospects for practical bioimage analysis via the web.}, journal = {Frontiers in bioinformatics}, volume = {3}, number = {}, pages = {1233748}, pmid = {37560357}, issn = {2673-7647}, support = {P41 GM135019/GM/NIGMS NIH HHS/United States ; }, abstract = {As biological imaging continues to rapidly advance, it results in increasingly complex image data, necessitating a reevaluation of conventional bioimage analysis methods and their accessibility. This perspective underscores our belief that a transition from desktop-based tools to web-based bioimage analysis could unlock immense opportunities for improved accessibility, enhanced collaboration, and streamlined workflows. We outline the potential benefits, such as reduced local computational demands and solutions to common challenges, including software installation issues and limited reproducibility. Furthermore, we explore the present state of web-based tools, hurdles in implementation, and the significance of collective involvement from the scientific community in driving this transition. In acknowledging the potential roadblocks and complexity of data management, we suggest a combined approach of selective prototyping and large-scale workflow application for optimal usage. Embracing web-based bioimage analysis could pave the way for the life sciences community to accelerate biological research, offering a robust platform for a more collaborative, efficient, and democratized science.}, } @article {pmid37556340, year = {2023}, author = {Liu, X and Zhao, X and Xia, Z and Feng, Q and Yu, P and Weng, J}, title = {Secure Outsourced SIFT: Accurate and Efficient Privacy-Preserving Image SIFT Feature Extraction.}, journal = {IEEE transactions on image processing : a publication of the IEEE Signal Processing Society}, volume = {32}, number = {}, pages = {4635-4648}, doi = {10.1109/TIP.2023.3295741}, pmid = {37556340}, issn = {1941-0042}, abstract = {Cloud computing has become an important IT infrastructure in the big data era; more and more users are motivated to outsource the storage and computation tasks to the cloud server for convenient services. However, privacy has become the biggest concern, and tasks are expected to be processed in a privacy-preserving manner. This paper proposes a secure SIFT feature extraction scheme with better integrity, accuracy and efficiency than the existing methods. SIFT includes lots of complex steps, including the construction of DoG scale space, extremum detection, extremum location adjustment, rejecting of extremum point with low contrast, eliminating of the edge response, orientation assignment, and descriptor generation. These complex steps need to be disassembled into elementary operations such as addition, multiplication, comparison for secure implementation. We adopt a serial of secret-sharing protocols for better accuracy and efficiency. In addition, we design a secure absolute value comparison protocol to support absolute value comparison operations in the secure SIFT feature extraction. The SIFT feature extraction steps are completely implemented in the ciphertext domain. And the communications between the clouds are appropriately packed to reduce the communication rounds. We carefully analyzed the accuracy and efficiency of our scheme. The experimental results show that our scheme outperforms the existing state-of-the-art.}, } @article {pmid37554555, year = {2023}, author = {Liu, X and Li, X and Gao, L and Zhang, J and Qin, D and Wang, K and Li, Z}, title = {Early-season and refined mapping of winter wheat based on phenology algorithms - a case of Shandong, China.}, journal = {Frontiers in plant science}, volume = {14}, number = {}, pages = {1016890}, pmid = {37554555}, issn = {1664-462X}, abstract = {Winter wheat is one of the major food crops in China, and timely and effective early-season identification of winter wheat is crucial for crop yield estimation and food security. However, traditional winter wheat mapping is based on post-season identification, which has a lag and relies heavily on sample data. Early-season identification of winter wheat faces the main difficulties of weak remote sensing response of the vegetation signal at the early growth stage, difficulty of acquiring sample data on winter wheat in the current season in real time, interference of crops in the same period, and limited image resolution. In this study, an early-season refined mapping method with winter wheat phenology information as priori knowledge is developed based on the Google Earth Engine cloud platform by using Sentinel-2 time series data as the main data source; these data are automated and highly interpretable. The normalized differential phenology index (NDPI) is adopted to enhance the weak vegetation signal at the early growth stage of winter wheat, and two winter wheat phenology feature enhancement indices based on NDPI, namely, wheat phenology differential index (WPDI) and normalized differential wheat phenology index (NDWPI) are developed. To address the issue of " different objects with the same spectra characteristics" between winter wheat and garlic, a plastic mulched index (PMI) is established through quantitative spectral analysis based on the differences in early planting patterns between winter wheat and garlic. The identification accuracy of the method is 82.64% and 88.76% in the early overwintering and regreening periods, respectively, These results were consistent with official statistics (R2 = 0.96 and 0.98, respectively). Generalization analysis demonstrated the spatiotemporal transferability of the method across different years and regions. In conclusion, the proposed methodology can obtain highly precise spatial distribution and planting area information of winter wheat 4_6 months before harvest. It provides theoretical and methodological guidance for early crop identification and has good scientific research and application value.}, } @article {pmid37549337, year = {2023}, author = {Wei, L and Xu, M and Liu, Z and Jiang, C and Lin, X and Hu, Y and Wen, X and Zou, R and Peng, C and Lin, H and Wang, G and Yang, L and Fang, L and Yang, M and Zhang, P}, title = {Hit Identification Driven by Combining Artificial Intelligence and Computational Chemistry Methods: A PI5P4K-β Case Study.}, journal = {Journal of chemical information and modeling}, volume = {63}, number = {16}, pages = {5341-5355}, doi = {10.1021/acs.jcim.3c00543}, pmid = {37549337}, issn = {1549-960X}, mesh = {*Artificial Intelligence ; *Computational Chemistry ; Drug Design ; Drug Discovery/methods ; }, abstract = {Computer-aided drug design (CADD), especially artificial intelligence-driven drug design (AIDD), is increasingly used in drug discovery. In this paper, a novel and efficient workflow for hit identification was developed within the ID4Inno drug discovery platform, featuring innovative artificial intelligence, high-accuracy computational chemistry, and high-performance cloud computing. The workflow was validated by discovering a few potent hit compounds (best IC50 is ∼0.80 μM) against PI5P4K-β, a novel anti-cancer target. Furthermore, by applying the tools implemented in ID4Inno, we managed to optimize these hit compounds and finally obtained five hit series with different scaffolds, all of which showed high activity against PI5P4K-β. These results demonstrate the effectiveness of ID4inno in driving hit identification based on artificial intelligence, computational chemistry, and cloud computing.}, } @article {pmid37549000, year = {2023}, author = {Guan, V and Zhou, C and Wan, H and Zhou, R and Zhang, D and Zhang, S and Yang, W and Voutharoja, BP and Wang, L and Win, KT and Wang, P}, title = {A Novel Mobile App for Personalized Dietary Advice Leveraging Persuasive Technology, Computer Vision, and Cloud Computing: Development and Usability Study.}, journal = {JMIR formative research}, volume = {7}, number = {}, pages = {e46839}, pmid = {37549000}, issn = {2561-326X}, abstract = {BACKGROUND: The Australian Dietary Guidelines (ADG) translate the best available evidence in nutrition into food choice recommendations. However, adherence to the ADG is poor in Australia. Given that following a healthy diet can be a potentially cost-effective strategy for lowering the risk of chronic diseases, there is an urgent need to develop novel technologies for individuals to improve their adherence to the ADG.

OBJECTIVE: This study describes the development process and design of a prototype mobile app for personalized dietary advice based on the ADG for adults in Australia, with the aim of exploring the usability of the prototype. The goal of the prototype was to provide personalized, evidence-based support for self-managing food choices in real time.

METHODS: The guidelines of the design science paradigm were applied to guide the design, development, and evaluation of a progressive web app using Amazon Web Services Elastic Compute Cloud services via iterations. The food layer of the Nutrition Care Process, the strategies of cognitive behavioral theory, and the ADG were translated into prototype features guided by the Persuasive Systems Design model. A gain-framed approach was adopted to promote positive behavior changes. A cross-modal image-to-recipe retrieval model under an Apache 2.0 license was deployed for dietary assessment. A survey using the Mobile Application Rating Scale and semistructured in-depth interviews were conducted to explore the usability of the prototype through convenience sampling (N=15).

RESULTS: The prominent features of the prototype included the use of image-based dietary assessment, food choice tracking with immediate feedback leveraging gamification principles, personal goal setting for food choices, and the provision of recipe ideas and information on the ADG. The overall prototype quality score was "acceptable," with a median of 3.46 (IQR 2.78-3.81) out of 5 points. The median score of the perceived impact of the prototype on healthy eating based on the ADG was 3.83 (IQR 2.75-4.08) out of 5 points. In-depth interviews identified the use of gamification for tracking food choices and innovation in the image-based dietary assessment as the main drivers of the positive user experience of using the prototype.

CONCLUSIONS: A novel evidence-based prototype mobile app was successfully developed by leveraging a cross-disciplinary collaboration. A detailed description of the development process and design of the prototype enhances its transparency and provides detailed insights into its creation. This study provides a valuable example of the development of a novel, evidence-based app for personalized dietary advice on food choices using recent advancements in computer vision. A revised version of this prototype is currently under development.}, } @article {pmid37541856, year = {2023}, author = {Xi, N and Liu, J and Li, Y and Qin, B}, title = {Decentralized access control for secure microservices cooperation with blockchain.}, journal = {ISA transactions}, volume = {141}, number = {}, pages = {44-51}, doi = {10.1016/j.isatra.2023.07.018}, pmid = {37541856}, issn = {1879-2022}, abstract = {With the rapid advancement of cloud-native computing, the microservice with high concurrency and low coupling has ushered in an unprecedented period of vigorous development. However, due to the mutability and complexity of cooperation procedures, it is difficult to realize high-efficient security management on these microservices. Traditional centralized access control has the defects of relying on a centralized cloud manager and a single point of failure. Meanwhile, decentralized mechanisms are defective by inconsistent policies defined by different participants. This paper first proposes a blockchain-based distributed access control policies and scheme, especially for microservices cooperation with dynamic access policies. We store the authorized security policies on the blockchain to solve the inconsistent policy problem while enabling individual management of personalized access policies by the providers rather than a central authority. Then we propose a graph-based decision-making scheme to achieve an efficient access control for microservices cooperation. Through the evaluations and experiments, it shows that our solution can realize effective distributed access control at an affordable cost.}, } @article {pmid37540975, year = {2023}, author = {Faber, DA and Hinman, JM and Knauer, EM and Hechenbleikner, EM and Badell, IR and Lin, E and Srinivasan, JK and Chahine, AA and Papandria, DJ}, title = {Implementation of an Online Intraoperative Assessment of Technical Performance for Surgical Trainees.}, journal = {The Journal of surgical research}, volume = {291}, number = {}, pages = {574-585}, doi = {10.1016/j.jss.2023.07.008}, pmid = {37540975}, issn = {1095-8673}, mesh = {*Internship and Residency ; Clinical Competence ; Education, Medical, Graduate/methods ; Feedback ; Educational Measurement/methods ; *General Surgery/education ; }, abstract = {INTRODUCTION: Assessment of surgical resident technical performance is an integral component of any surgical training program. Timely assessment delivered in a structured format is a critical step to enhance technical skills, but residents often report that the quality and quantity of timely feedback received is lacking. Moreover, the absence of written feedback with specificity can allow residents to seemingly progress in their operative milestones as a junior resident, but struggle as they progress into their postgraduate year 3 and above. We therefore designed and implemented a web-based intraoperative assessment tool and corresponding summary "dashboard" to facilitate real-time assessment and documentation of technical performance.

MATERIALS AND METHODS: A web form was designed leveraging a cloud computing platform and implementing a modified Ottawa Surgical Competency Operating Room Evaluation instrument; this included additional, procedure-specific criteria for select operations. A link to this was provided to residents via email and to all surgical faculty as a Quick Response code. Residents open and complete a portion of the form on a smartphone, then relinquish the device to an attending surgeon who then completes and submits the assessment. The data are then transferred to a secure web-based reporting interface; each resident (together with a faculty advisor) can then access and review all completed assessments.

RESULTS: The Assessment form was activated in June 2021 and formally introduced to all residents in July 2021, with residents required to complete at least one assessment per month. Residents with less predictable access to operative procedures (night float or Intensive Care Unit) were exempted from the requirement on those months. To date a total of 559 assessments have been completed for operations performed by 56 trainees, supervised by 122 surgical faculty and senior trainees. The mean number of procedures assessed per resident was 10.0 and the mean number per assessor was 4.6. Resident initiation of Intraoperative Assessments has increased since the tool was introduced and scores for technical and nontechnical performance reliably differentiate residents by seniority.

CONCLUSIONS: This novel system demonstrates that an online, resident-initiated technical assessment tool is feasible to implement and scale. This model's requirement that the attending enter performance ratings into the trainee's electronic device ensures that feedback is delivered directly to the trainee. Whether this aspect of our assessment ensures more direct and specific (and therefore potentially actionable) feedback is a focus for future study. Our use of commercial cloud computing services should permit cost-effective adoption of similar systems at other training programs.}, } @article {pmid37538659, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: Optimization of Online Course Platform for Piano Preschool Education Based on Internet Cloud Computing System.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9856831}, pmid = {37538659}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/6525866.].}, } @article {pmid37538595, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: The Use of Internet of Things and Cloud Computing Technology in the Performance Appraisal Management of Innovation Capability of University Scientific Research Team.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9806856}, pmid = {37538595}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/9423718.].}, } @article {pmid37529586, year = {2023}, author = {Aja, D and Miyittah, M and Angnuureng, DB}, title = {Nonparametric assessment of mangrove ecosystem in the context of coastal resilience in Ghana.}, journal = {Ecology and evolution}, volume = {13}, number = {8}, pages = {e10388}, pmid = {37529586}, issn = {2045-7758}, abstract = {Cloud cover effects make it difficult to evaluate the mangrove ecosystem in tropical locations using solely optical satellite data. Therefore, it is essential to conduct a more precise evaluation using data from several sources and appropriate models in order to manage the mangrove ecosystem as effectively as feasible. In this study, the status of the mangrove ecosystem and its potential contribution to coastal resilience were evaluated using the Google Earth Engine (GEE) and the InVEST model. The GEE was used to map changes in mangrove and other land cover types for the years 2009 and 2019 by integrating both optical and radar data. The quantity allocation disagreement index (QADI) was used to assess the classification accuracy. Mangrove height and aboveground biomass density were estimated using GEE by extracting their values from radar image clipped with a digital elevation model and mangrove vector file. A universal allometric equation that relates canopy height to aboveground biomass was applied. The InVEST model was used to calculate a hazard index of every 250 m of the shoreline with and without mangrove ecosystem. Our result showed that about 16.9% and 21% of mangrove and other vegetation cover were lost between 2009 and 2019. However, water body and bare land/built-up areas increased by 7% and 45%, respectively. The overall accuracy of 2009 and 2019 classifications was 99.6% (QADI = 0.00794) and 99.1% (QADI = 0.00529), respectively. Mangrove height and aboveground biomass generally decreased from 12.7 to 6.3 m and from 105 to 88 Mg/ha on average. The vulnerability index showed that 23%, 51% and 26% of the coastal segment in the presence of mangrove fall under very low/low, moderate and high risks, respectively. Whereas in the absence of mangrove, 8%, 38%, 39% and 15% fall under low, moderate, high and very high-risk zones, respectively. This study will among other things help the stakeholders in coastal management and marine spatial planning to identify the need to focus on conservation practices.}, } @article {pmid37521954, year = {2023}, author = {Bommu, S and M, AK and Babburu, K and N, S and Thalluri, LN and G, VG and Gopalan, A and Mallapati, PK and Guha, K and Mohammad, HR and S, SK}, title = {Smart City IoT System Network Level Routing Analysis and Blockchain Security Based Implementation.}, journal = {Journal of electrical engineering & technology}, volume = {18}, number = {2}, pages = {1351-1368}, pmid = {37521954}, issn = {2093-7423}, abstract = {This paper demonstrates, network-level performance analysis and implementation of smart city Internet of Things (IoT) system with Infrastructure as a Service (IaaS) level cloud computing architecture. The smart city IoT network topology performance is analyzed at the simulation level using the NS3 simulator by extracting most of the performance-deciding parameters. The performance-enhanced smart city topology is practically implemented in IaaS level architecture. The intended smart city IoT system can monitor the principal parameters like video surveillance with a thermal camera (to identify the virus-like COVID-19 infected people), transport, water quality, solar radiation, sound pollution, air quality (O3, NO2, CO, Particles), parking zones, iconic places, E-suggestions, PRO information over low power wide area network in 61.88 km × 61.88 km range. Primarily we have addressed the IoT network-level routing and quality of service (QoS) challenges and implementation level security challenges. The simulation level network topology analysis is performed to improve the routing and QoS. Blockchain technology-based decentralization is adopted to enrich the IoT system performance in terms of security.}, } @article {pmid37514843, year = {2023}, author = {Montiel-Caminos, J and Hernandez-Gonzalez, NG and Sosa, J and Montiel-Nelson, JA}, title = {Integer Arithmetic Algorithm for Fundamental Frequency Identification of Oceanic Currents.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {14}, pages = {}, pmid = {37514843}, issn = {1424-8220}, support = {PID2020-117251RB-C21//Ministerio de Ciencia e Innovación de España - Agencia Estatal de Investigación/ ; TED2021-131470B-I00//Ministerio de Ciencia e Innovación de España - Agencia Estatal de Investigación/ ; }, abstract = {Underwater sensor networks play a crucial role in collecting valuable data to monitor offshore aquaculture infrastructures. The number of deployed devices not only impacts the bandwidth for a highly constrained communication environment, but also the cost of the sensor network. On the other hand, industrial and literature current meters work as raw data loggers, and most of the calculations to determine the fundamental frequencies are performed offline on a desktop computer or in the cloud. Belonging to the edge computing research area, this paper presents an algorithm to extract the fundamental frequencies of water currents in an underwater sensor network deployed in offshore aquaculture infrastructures. The target sensor node is based on a commercial ultra-low-power microcontroller. The proposed fundamental frequency identification algorithm only requires the use of an integer arithmetic unit. Our approach exploits the mathematical properties of the finite impulse response (FIR) filtering in the integer domain. The design and implementation of the presented algorithm are discussed in detail in terms of FIR tuning/coefficient selection, memory usage and variable domain for its mathematical formulation aimed at reducing the computational effort required. The approach is validated using a shallow water current model and real-world raw data from an offshore aquaculture infrastructure. The extracted frequencies have a maximum error below a 4%.}, } @article {pmid37514672, year = {2023}, author = {Zhang, M and Chen, Y and Qian, C}, title = {Fooling Examples: Another Intriguing Property of Neural Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {14}, pages = {}, pmid = {37514672}, issn = {1424-8220}, support = {6142111//Foundation of National Key Laboratory of Science and Technology on Information System Security/ ; }, abstract = {Neural networks have been proven to be vulnerable to adversarial examples; these are examples that can be recognized by both humans and neural networks, although neural networks give incorrect predictions. As an intriguing property of neural networks, adversarial examples pose a serious threat to the secure application of neural networks. In this article, we present another intriguing property of neural networks: the fact that well-trained models believe some examples to be recognizable objects (often with high confidence), while humans cannot recognize such examples. We refer to these as "fooling examples". Specifically, we take inspiration from the construction of adversarial examples and develop an iterative method for generating fooling examples. The experimental results show that fooling examples can not only be easily generated, with a success rate of nearly 100% in the white-box scenario, but also exhibit strong transferability across different models in the black-box scenario. Tests on the Google Cloud Vision API show that fooling examples can also be recognized by real-world computer vision systems. Our findings reveal a new cognitive deficit of neural networks, and we hope that these potential security threats will be addressed in future neural network applications.}, } @article {pmid37514557, year = {2023}, author = {Alzuhair, A and Alghaihab, A}, title = {The Design and Optimization of an Acoustic and Ambient Sensing AIoT Platform for Agricultural Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {14}, pages = {}, pmid = {37514557}, issn = {1424-8220}, support = {IFKSUOR3-109-1//Ministry of Education, Saudi Arabia/ ; }, abstract = {The use of technology in agriculture has been gaining significant attention recently. By employing advanced tools and automation and leveraging the latest advancements in the Internet of Things (IoT) and artificial intelligence (AI), the agricultural sector is witnessing improvements in its crop yields and overall efficiency. This paper presents the design and performance analysis of a machine learning (ML) model for agricultural applications involving acoustic sensing. This model is integrated into an efficient Artificial Intelligence of Things (AIoT) platform tailored for agriculture. The model is then used in the design of a communication network architecture and for determining the distribution of the computing load between edge devices and the cloud. The study focuses on the design, analysis, and optimization of AI deployment for reliable classification models in agricultural applications. Both the architectural level and hardware implementation are taken into consideration when designing the radio module and computing unit. Additionally, the study encompasses the design and performance analysis of the hardware used to implement the sensor node specifically developed for sound classification in agricultural applications. The novelty of this work lies in the optimization of the integrated sensor node, which combines the proposed ML model and wireless network, resulting in an agricultural-specific AIoT platform. This co-design enables significant improvements in the performance and efficiency for acoustic and ambient sensing applications.}, } @article {pmid37510005, year = {2023}, author = {Fu, M and Zhang, C and Hu, C and Wu, T and Dong, J and Zhu, L}, title = {Achieving Verifiable Decision Tree Prediction on Hybrid Blockchains.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {7}, pages = {}, pmid = {37510005}, issn = {1099-4300}, support = {No. 62202051//National Natural Science Foundation of China/ ; Nos. 2021M700435, 2021TQ0042//China Postdoctoral Science Foundation/ ; Grant Nos. 2021YFB2700500 and 2021YFB2700503//National Key R&D Program of China/ ; No. 2022B1212010005//Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies/ ; }, abstract = {Machine learning has become increasingly popular in academic and industrial communities and has been widely implemented in various online applications due to its powerful ability to analyze and use data. Among all the machine learning models, decision tree models stand out due to their great interpretability and simplicity, and have been implemented in cloud computing services for various purposes. Despite its great success, the integrity issue of online decision tree prediction is a growing concern. The correctness and consistency of decision tree predictions in cloud computing systems need more security guarantees since verifying the correctness of the model prediction remains challenging. Meanwhile, blockchain has a promising prospect in two-party machine learning services as the immutable and traceable characteristics satisfy the verifiable settings in machine learning services. In this paper, we initiate the study of decision tree prediction services on blockchain systems and propose VDT, a Verifiable Decision Tree prediction scheme for decision tree prediction. Specifically, by leveraging the Merkle tree and hash function, the scheme allows the service provider to generate a verification proof to convince the client that the output of the decision tree prediction is correctly computed on a particular data sample. It is further extended to an update method for a verifiable decision tree to modify the decision tree model efficiently. We prove the security of the proposed VDT schemes and evaluate their performance using real datasets. Experimental evaluations show that our scheme requires less than one second to produce verifiable proof.}, } @article {pmid37509978, year = {2023}, author = {Ye, C and Tan, S and Wang, Z and Shi, B and Shi, L}, title = {Hybridized Hierarchical Watermarking and Selective Encryption for Social Image Security.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {7}, pages = {}, pmid = {37509978}, issn = {1099-4300}, support = {61502154//National Natural Science Foundation of China/ ; }, abstract = {With the advent of cloud computing and social multimedia communication, more and more social images are being collected on social media platforms, such as Facebook, TikTok, Flirk, and YouTube. The amount of social images produced and disseminated is rapidly increasing. Meanwhile, cloud computing-assisted social media platforms have made social image dissemination more and more efficient. There exists an unstoppable trend of fake/unauthorized social image dissemination. The growth of social image sharing underscores potential security risks for illegal use, such as image forgery, malicious copying, piracy exposure, plagiarism, and misappropriation. Therefore, secure social image dissemination has become urgent and critical on social media platforms. The authors propose a secure scheme for social image dissemination on social media platforms. The main objective is to make a map between the tree structure Haar (TSH) transform and the hierarchical community structure of a social network. First, perform the TSH transform on a social image using social network analysis (SNA). Second, all users in a social media platform are coded using SNA. Third, watermarking and encryption are performed in a compressed domain for protecting social image dissemination. Finally, the encrypted and watermarked contents are delivered to users via a hybrid multicast-unicast scheme. The use of encryption along with watermarking can provide double protection for social image dissemination. The theory analysis and experimental results demonstrate the effectiveness of the proposed scheme.}, } @article {pmid37503119, year = {2023}, author = {Hitz, BC and Lee, JW and Jolanki, O and Kagda, MS and Graham, K and Sud, P and Gabdank, I and Strattan, JS and Sloan, CA and Dreszer, T and Rowe, LD and Podduturi, NR and Malladi, VS and Chan, ET and Davidson, JM and Ho, M and Miyasato, S and Simison, M and Tanaka, F and Luo, Y and Whaling, I and Hong, EL and Lee, BT and Sandstrom, R and Rynes, E and Nelson, J and Nishida, A and Ingersoll, A and Buckley, M and Frerker, M and Kim, DS and Boley, N and Trout, D and Dobin, A and Rahmanian, S and Wyman, D and Balderrama-Gutierrez, G and Reese, F and Durand, NC and Dudchenko, O and Weisz, D and Rao, SSP and Blackburn, A and Gkountaroulis, D and Sadr, M and Olshansky, M and Eliaz, Y and Nguyen, D and Bochkov, I and Shamim, MS and Mahajan, R and Aiden, E and Gingeras, T and Heath, S and Hirst, M and Kent, WJ and Kundaje, A and Mortazavi, A and Wold, B and Cherry, JM}, title = {The ENCODE Uniform Analysis Pipelines.}, journal = {Research square}, volume = {}, number = {}, pages = {}, pmid = {37503119}, issn = {2693-5015}, support = {R01 HG009318/HG/NHGRI NIH HHS/United States ; U24 HG009397/HG/NHGRI NIH HHS/United States ; }, abstract = {The Encyclopedia of DNA elements (ENCODE) project is a collaborative effort to create a comprehensive catalog of functional elements in the human genome. The current database comprises more than 19000 functional genomics experiments across more than 1000 cell lines and tissues using a wide array of experimental techniques to study the chromatin structure, regulatory and transcriptional landscape of the Homo sapiens and Mus musculus genomes. All experimental data, metadata, and associated computational analyses created by the ENCODE consortium are submitted to the Data Coordination Center (DCC) for validation, tracking, storage, and distribution to community resources and the scientific community. The ENCODE project has engineered and distributed uniform processing pipelines in order to promote data provenance and reproducibility as well as allow interoperability between genomic resources and other consortia. All data files, reference genome versions, software versions, and parameters used by the pipelines are captured and available via the ENCODE Portal. The pipeline code, developed using Docker and Workflow Description Language (WDL; https://openwdl.org/) is publicly available in GitHub, with images available on Dockerhub (https://hub.docker.com), enabling access to a diverse range of biomedical researchers. ENCODE pipelines maintained and used by the DCC can be installed to run on personal computers, local HPC clusters, or in cloud computing environments via Cromwell. Access to the pipelines and data via the cloud allows small labs the ability to use the data or software without access to institutional compute clusters. Standardization of the computational methodologies for analysis and quality control leads to comparable results from different ENCODE collections - a prerequisite for successful integrative analyses.}, } @article {pmid37501454, year = {2023}, author = {Swathi, HY and Shivakumar, G}, title = {Audio-visual multi-modality driven hybrid feature learning model for crowd analysis and classification.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {20}, number = {7}, pages = {12529-12561}, doi = {10.3934/mbe.2023558}, pmid = {37501454}, issn = {1551-0018}, abstract = {The high pace emergence in advanced software systems, low-cost hardware and decentralized cloud computing technologies have broadened the horizon for vision-based surveillance, monitoring and control. However, complex and inferior feature learning over visual artefacts or video streams, especially under extreme conditions confine majority of the at-hand vision-based crowd analysis and classification systems. Retrieving event-sensitive or crowd-type sensitive spatio-temporal features for the different crowd types under extreme conditions is a highly complex task. Consequently, it results in lower accuracy and hence low reliability that confines existing methods for real-time crowd analysis. Despite numerous efforts in vision-based approaches, the lack of acoustic cues often creates ambiguity in crowd classification. On the other hand, the strategic amalgamation of audio-visual features can enable accurate and reliable crowd analysis and classification. Considering it as motivation, in this research a novel audio-visual multi-modality driven hybrid feature learning model is developed for crowd analysis and classification. In this work, a hybrid feature extraction model was applied to extract deep spatio-temporal features by using Gray-Level Co-occurrence Metrics (GLCM) and AlexNet transferrable learning model. Once extracting the different GLCM features and AlexNet deep features, horizontal concatenation was done to fuse the different feature sets. Similarly, for acoustic feature extraction, the audio samples (from the input video) were processed for static (fixed size) sampling, pre-emphasis, block framing and Hann windowing, followed by acoustic feature extraction like GTCC, GTCC-Delta, GTCC-Delta-Delta, MFCC, Spectral Entropy, Spectral Flux, Spectral Slope and Harmonics to Noise Ratio (HNR). Finally, the extracted audio-visual features were fused to yield a composite multi-modal feature set, which is processed for classification using the random forest ensemble classifier. The multi-class classification yields a crowd-classification accurac12529y of (98.26%), precision (98.89%), sensitivity (94.82%), specificity (95.57%), and F-Measure of 98.84%. The robustness of the proposed multi-modality-based crowd analysis model confirms its suitability towards real-world crowd detection and classification tasks.}, } @article {pmid37491843, year = {2023}, author = {Kong, HJ}, title = {Classification of dental implant systems using cloud-based deep learning algorithm: an experimental study.}, journal = {Journal of Yeungnam medical science}, volume = {40}, number = {Suppl}, pages = {S29-S36}, pmid = {37491843}, issn = {2799-8010}, abstract = {BACKGROUND: This study aimed to evaluate the accuracy and clinical usability of implant system classification using automated machine learning on a Google Cloud platform.

METHODS: Four dental implant systems were selected: Osstem TSIII, Osstem USII, Biomet 3i Os-seotite External, and Dentsply Sirona Xive. A total of 4,800 periapical radiographs (1,200 for each implant system) were collected and labeled based on electronic medical records. Regions of interest were manually cropped to 400×800 pixels, and all images were uploaded to Google Cloud storage. Approximately 80% of the images were used for training, 10% for validation, and 10% for testing. Google automated machine learning (AutoML) Vision automatically executed a neural architecture search technology to apply an appropriate algorithm to the uploaded data. A single-label image classification model was trained using AutoML. The performance of the mod-el was evaluated in terms of accuracy, precision, recall, specificity, and F1 score.

RESULTS: The accuracy, precision, recall, specificity, and F1 score of the AutoML Vision model were 0.981, 0.963, 0.961, 0.985, and 0.962, respectively. Osstem TSIII had an accuracy of 100%. Osstem USII and 3i Osseotite External were most often confused in the confusion matrix.

CONCLUSION: Deep learning-based AutoML on a cloud platform showed high accuracy in the classification of dental implant systems as a fine-tuned convolutional neural network. Higher-quality images from various implant systems will be required to improve the performance and clinical usability of the model.}, } @article {pmid37486501, year = {2023}, author = {Didachos, C and Kintos, DP and Fousteris, M and Mylonas, P and Kanavos, A}, title = {An Optimized Cloud Computing Method for Extracting Molecular Descriptors.}, journal = {Advances in experimental medicine and biology}, volume = {1424}, number = {}, pages = {247-254}, pmid = {37486501}, issn = {0065-2598}, mesh = {*Cloud Computing ; *Algorithms ; }, abstract = {Extracting molecular descriptors from chemical compounds is an essential preprocessing phase for developing accurate classification models. Supervised machine learning algorithms offer the capability to detect "hidden" patterns that may exist in a large dataset of compounds, which are represented by their molecular descriptors. Assuming that molecules with similar structure tend to share similar physicochemical properties, large chemical libraries can be screened by applying similarity sourcing techniques in order to detect potential bioactive compounds against a molecular target. However, the process of generating these compound features is time-consuming. Our proposed methodology not only employs cloud computing to accelerate the process of extracting molecular descriptors but also introduces an optimized approach to utilize the computational resources in the most efficient way.}, } @article {pmid37475814, year = {2023}, author = {International, BR}, title = {Retracted: Medical Big Data and Postoperative Nursing of Fracture Patients Based on Cloud Computing.}, journal = {BioMed research international}, volume = {2023}, number = {}, pages = {9768264}, pmid = {37475814}, issn = {2314-6141}, abstract = {[This retracts the article DOI: 10.1155/2022/4090235.].}, } @article {pmid37469244, year = {2023}, author = {Pant, A and Miri, N and Bhagroo, S and Mathews, JA and Nazareth, DP}, title = {Monitor unit verification for Varian TrueBeam VMAT plans using Monte Carlo calculations and phase space data.}, journal = {Journal of applied clinical medical physics}, volume = {24}, number = {10}, pages = {e14063}, pmid = {37469244}, issn = {1526-9914}, mesh = {Humans ; *Radiotherapy, Intensity-Modulated/methods ; Computer Simulation ; Software ; Particle Accelerators ; Radiotherapy Dosage ; Monte Carlo Method ; Radiotherapy Planning, Computer-Assisted/methods ; }, abstract = {To use the open-source Monte Carlo (MC) software calculations for TPS monitor unit verification of VMAT plans, delivered with the Varian TrueBeam linear accelerator, and compare the results with a commercial software product, following the guidelines set in AAPM Task Group 219. The TrueBeam is modeled in EGSnrc using the Varian-provided phase-space files. Thirteen VMAT TrueBeam treatment plans representing various anatomical regions were evaluated, comprising 37 treatment arcs. VMAT plans simulations were performed on a computing cluster, using 10[7] -10[9] particle histories per arc. Point dose differences at five reference points per arc were compared between Eclipse, MC, and the commercial software, MUCheck. MC simulation with 5 × 10[7] histories per arc offered good agreement with Eclipse and a reasonable average calculation time of 9-18 min per full plan. The average absolute difference was 3.0%, with only 22% of all points exceeding the 5% action limit. In contrast, the MUCheck average absolute difference was 8.4%, with 60% of points exceeding the 5% dose difference. Lung plans were particularly problematic for MUCheck, with an average absolute difference of approximately 16%. Our EGSnrc-based MC framework can be used for the MU verification of VMAT plans calculated for the Varian TrueBeam; furthermore, our phase space approach can be adapted to other treatment devices by using appropriate phase space files. The use of 5 × 10[7] histories consistently satisfied the 5% action limit across all plan types for the majority of points, performing significantly better than a commercial MU verification system, MUCheck. As faster processors and cloud computing facilities become even more widely available, this approach can be readily implemented in clinical settings.}, } @article {pmid37467974, year = {2023}, author = {Mhanna, S and Halloran, LJS and Zwahlen, F and Asaad, AH and Brunner, P}, title = {Using machine learning and remote sensing to track land use/land cover changes due to armed conflict.}, journal = {The Science of the total environment}, volume = {898}, number = {}, pages = {165600}, doi = {10.1016/j.scitotenv.2023.165600}, pmid = {37467974}, issn = {1879-1026}, mesh = {*Remote Sensing Technology ; *Conservation of Natural Resources/methods ; Agriculture/methods ; Environmental Monitoring/methods ; Climate ; }, abstract = {Armed conflicts have detrimental impacts on the environment, including land systems. The prevailing understanding of the relation between Land Use/Land Cover (LULC) and armed conflict fails to fully recognize the complexity of their dynamics - a shortcoming that could undermine food security and sustainable land/water resources management in conflict settings. The Syrian portion of the transboundary Orontes River Basin (ORB) has been a site of violent conflict since 2013. Correspondingly, the Lebanese and Turkish portions of the ORB have seen large influxes of refugees. A major challenge in any geoscientific investigation in this region, specifically the Syrian portion, is the unavailability of directly-measured "ground truth" data. To circumvent this problem, we develop a novel methodology that combines remote sensing products, machine learning techniques and quasi-experimental statistical analysis to better understand LULC changes in the ORB between 2004 and 2022. Through analysis of the resulting annual LULC maps, we can draw several quantitative conclusions. Cropland areas decreased by 21-24 % in Syria's conflict hotspot zones after 2013, whereas a 3.4-fold increase was detected in Lebanon. The development of refugee settlements was also tracked in Lebanon and on the Syrian/Turkish borders, revealing different LULC patterns that depend on settlement dynamics. The results highlight the importance of understanding the heterogenous spatio-temporal LULC changes in conflict-affected and refugee-hosting countries. The developed methodology is a flexible, cloud-based approach that can be applied to wide variety of LULC investigations related to conflict, policy and climate.}, } @article {pmid37453975, year = {2024}, author = {Venkataswamy, R and Janamala, V and Cherukuri, RC}, title = {Realization of Humanoid Doctor and Real-Time Diagnostics of Disease Using Internet of Things, Edge Impulse Platform, and ChatGPT.}, journal = {Annals of biomedical engineering}, volume = {52}, number = {4}, pages = {738-740}, pmid = {37453975}, issn = {1573-9686}, mesh = {Humans ; *Artificial Intelligence ; *Internet of Things ; Health Personnel ; }, abstract = {Humanoid doctor is an AI-based robot that featured remote bi-directional communication and is embedded with disruptive technologies. Accurate and real-time responses are the main characteristics of a humanoid doctor which diagnoses disease in a patient. The patient details are obtained by Internet of Things devices, edge devices, and text formats. The inputs from the patient are processed by the humanoid doctor, and it provides its opinion to the patient. The historical patient data are trained using cloud artificial intelligence platform and the model is tested against the patient sample data acquired using medical IoT and edge devices. Disease is identified at three different stages and analyzed. The humanoid doctor is expected to identify the diseases well in comparison with human healthcare professionals. The humanoid doctor is under-trusted because of the lack of a multi-featured accurate model, accessibility, availability, and standardization. In this letter, patient input, artificial intelligence, and response zones are encapsulated and the humanoid doctor is realized.}, } @article {pmid37448004, year = {2023}, author = {Mangalampalli, S and Swain, SK and Chakrabarti, T and Chakrabarti, P and Karri, GR and Margala, M and Unhelkar, B and Krishnan, SB}, title = {Prioritized Task-Scheduling Algorithm in Cloud Computing Using Cat Swarm Optimization.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {13}, pages = {}, pmid = {37448004}, issn = {1424-8220}, mesh = {*Cloud Computing ; *Algorithms ; Workload ; }, abstract = {Effective scheduling algorithms are needed in the cloud paradigm to leverage services to customers seamlessly while minimizing the makespan, energy consumption and SLA violations. The ineffective scheduling of resources while not considering the suitability of tasks will affect the quality of service of the cloud provider, and much more energy will be consumed in the running of tasks by the inefficient provisioning of resources, thereby taking an enormous amount of time to process tasks, which affects the makespan. Minimizing SLA violations is an important aspect that needs to be addressed as it impacts the makespans, energy consumption, and also the quality of service in a cloud environment. Many existing studies have solved task-scheduling problems, and those algorithms gave near-optimal solutions from their perspective. In this manuscript, we developed a novel task-scheduling algorithm that considers the task priorities coming onto the cloud platform, calculates their task VM priorities, and feeds them to the scheduler. Then, the scheduler will choose appropriate tasks for the VMs based on the calculated priorities. To model this scheduling algorithm, we used the cat swarm optimization algorithm, which was inspired by the behavior of cats. It was implemented on the Cloudsim tool and OpenStack cloud platform. Extensive experimentation was carried out using real-time workloads. When compared to the baseline PSO, ACO and RATS-HM approaches and from the results, it is evident that our proposed approach outperforms all of the baseline algorithms in view of the above-mentioned parameters.}, } @article {pmid37447987, year = {2023}, author = {Chen, CL and Lai, JL}, title = {An Experimental Detection of Distributed Denial of Service Attack in CDX 3 Platform Based on Snort.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {13}, pages = {}, pmid = {37447987}, issn = {1424-8220}, mesh = {*Algorithms ; *Internet ; Machine Learning ; }, abstract = {Distributed Denial of Service (DDoS) attacks pose a significant threat to internet and cloud security. Our study utilizes a Poisson distribution model to efficiently detect DDoS attacks with a computational complexity of O(n). Unlike Machine Learning (ML)-based algorithms, our method only needs to set up one or more Poisson models for legitimate traffic based on the granularity of the time periods during preprocessing, thus eliminating the need for training time. We validate this approach with four virtual machines on the CDX 3.0 platform, each simulating different aspects of DDoS attacks for offensive, monitoring, and defense evaluation purposes. The study further analyzes seven diverse DDoS attack methods. When compared with existing methods, our approach demonstrates superior performance, highlighting its potential effectiveness in real-world DDoS attack detection.}, } @article {pmid37447969, year = {2023}, author = {Hsieh, TM and Chen, KY}, title = {Knowledge Development Trajectory of the Internet of Vehicles Domain Based on Main Path Analysis.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {13}, pages = {}, pmid = {37447969}, issn = {1424-8220}, mesh = {*Internet ; Cloud Computing ; Automation ; *Blockchain ; Cities ; }, abstract = {The Internet of vehicles (IoV) is an Internet-of-things-based network in the area of transportation. It comprises sensors, network communication, automation control, and data processing and enables connectivity between vehicles and other objects. This study performed main path analysis (MPA) to investigate the trajectory of research regarding the IoV. Studies were extracted from the Web of Science database, and citation networks among these studies were generated. MPA revealed that research in this field has mainly covered media access control, vehicle-to-vehicle channels, device-to-device communications, layers, non-orthogonal multiple access, and sixth-generation communications. Cluster analysis and data mining revealed that the main research topics related to the IoV included wireless channels, communication protocols, vehicular ad hoc networks, security and privacy, resource allocation and optimization, autonomous cruise control, deep learning, and edge computing. By using data mining and statistical analysis, we identified emerging research topics related to the IoV, namely blockchains, deep learning, edge computing, cloud computing, vehicular dynamics, and fifth- and sixth-generation mobile communications. These topics are likely to help drive innovation and the further development of IoV technologies and contribute to smart transportation, smart cities, and other applications. On the basis of the present results, this paper offers several predictions regarding the future of research regarding the IoV.}, } @article {pmid37447967, year = {2023}, author = {Adnan, M and Slavic, G and Martin Gomez, D and Marcenaro, L and Regazzoni, C}, title = {Systematic and Comprehensive Review of Clustering and Multi-Target Tracking Techniques for LiDAR Point Clouds in Autonomous Driving Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {13}, pages = {}, pmid = {37447967}, issn = {1424-8220}, support = {PID2019-104793RB-C31//Spanish Government under Grants/ ; PDC2021-1215-17-C31//Spanish Government under Grants/ ; PID2021-124335OB-C21//Spanish Government under Grants/ ; TED2021-129485B-C44//Spanish Government under Grants/ ; P2018/EMT-4362//Comunidad de Madrid under Grant SEGVAUTO-4.0-CM/ ; }, mesh = {Reproducibility of Results ; *Autonomous Vehicles ; Cluster Analysis ; Databases, Factual ; *Evidence Gaps ; }, abstract = {Autonomous vehicles (AVs) rely on advanced sensory systems, such as Light Detection and Ranging (LiDAR), to function seamlessly in intricate and dynamic environments. LiDAR produces highly accurate 3D point clouds, which are vital for the detection, classification, and tracking of multiple targets. A systematic review and classification of various clustering and Multi-Target Tracking (MTT) techniques are necessary due to the inherent challenges posed by LiDAR data, such as density, noise, and varying sampling rates. As part of this study, the Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) methodology was employed to examine the challenges and advancements in MTT techniques and clustering for LiDAR point clouds within the context of autonomous driving. Searches were conducted in major databases such as IEEE Xplore, ScienceDirect, SpringerLink, ACM Digital Library, and Google Scholar, utilizing customized search strategies. We identified and critically reviewed 76 relevant studies based on rigorous screening and evaluation processes, assessing their methodological quality, data handling adequacy, and reporting compliance. As a result of this comprehensive review and classification, we were able to provide a detailed overview of current challenges, research gaps, and advancements in clustering and MTT techniques for LiDAR point clouds, thus contributing to the field of autonomous driving. Researchers and practitioners working in the field of autonomous driving will benefit from this study, which was characterized by transparency and reproducibility on a systematic basis.}, } @article {pmid37447966, year = {2023}, author = {Kaur, A and Kumar, S and Gupta, D and Hamid, Y and Hamdi, M and Ksibi, A and Elmannai, H and Saini, S}, title = {Algorithmic Approach to Virtual Machine Migration in Cloud Computing with Updated SESA Algorithm.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {13}, pages = {}, pmid = {37447966}, issn = {1424-8220}, support = {PNURSP2023R125//Princess Nourah bint Abdulrahman University Researchers Supporting Project/ ; }, abstract = {Cloud computing plays an important role in every IT sector. Many tech giants such as Google, Microsoft, and Facebook as deploying their data centres around the world to provide computation and storage services. The customers either submit their job directly or they take the help of the brokers for the submission of the jobs to the cloud centres. The preliminary aim is to reduce the overall power consumption which was ignored in the early days of cloud development. This was due to the performance expectations from cloud servers as they were supposed to provide all the services through their services layers IaaS, PaaS, and SaaS. As time passed and researchers came up with new terminologies and algorithmic architecture for the reduction of power consumption and sustainability, other algorithmic anarchies were also introduced, such as statistical oriented learning and bioinspired algorithms. In this paper, an indepth focus has been done on multiple approaches for migration among virtual machines and find out various issues among existing approaches. The proposed work utilizes elastic scheduling inspired by the smart elastic scheduling algorithm (SESA) to develop a more energy-efficient VM allocation and migration algorithm. The proposed work uses cosine similarity and bandwidth utilization as additional utilities to improve the current performance in terms of QoS. The proposed work is evaluated for overall power consumption and service level agreement violation (SLA-V) and is compared with related state of art techniques. A proposed algorithm is also presented in order to solve problems found during the survey.}, } @article {pmid37447902, year = {2023}, author = {Botez, R and Pasca, AG and Sferle, AT and Ivanciu, IA and Dobrota, V}, title = {Efficient Network Slicing with SDN and Heuristic Algorithm for Low Latency Services in 5G/B5G Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {13}, pages = {}, pmid = {37447902}, issn = {1424-8220}, abstract = {This paper presents a novel approach for network slicing in 5G backhaul networks, targeting services with low or very low latency requirements. We propose a modified A* algorithm that incorporates network quality of service parameters into a composite metric. The algorithm's efficiency outperforms that of Dijkstra's algorithm using a precalculated heuristic function and a real-time monitoring strategy for congestion management. We integrate the algorithm into an SDN module called a path computation element, which computes the optimal path for the network slices. Experimental results show that the proposed algorithm significantly reduces processing time compared to Dijkstra's algorithm, particularly in complex topologies, with an order of magnitude improvement. The algorithm successfully adjusts paths in real-time to meet low latency requirements, preventing packet delay from exceeding the established threshold. The end-to-end measurements using the Speedtest client validate the algorithm's performance in differentiating traffic with and without delay requirements. These results demonstrate the efficacy of our approach in achieving ultra-reliable low-latency communication (URLLC) in 5G backhaul networks.}, } @article {pmid37447888, year = {2023}, author = {Fereira, R and Ranaweera, C and Lee, K and Schneider, JG}, title = {Energy Efficient Node Selection in Edge-Fog-Cloud Layered IoT Architecture.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {13}, pages = {}, pmid = {37447888}, issn = {1424-8220}, mesh = {*Conservation of Energy Resources ; *Internet of Things ; Autonomous Vehicles ; Communication ; Computer Systems ; }, abstract = {Internet of Things (IoT) architectures generally focus on providing consistent performance and reliable communications. The convergence of IoT, edge, fog, and cloud aims to improve the quality of service of applications, which does not typically emphasize energy efficiency. Considering energy in IoT architectures would reduce the energy impact from billions of IoT devices. The research presented in this paper proposes an optimization framework that considers energy consumption of nodes when selecting a node for processing an IoT request in edge-fog-cloud layered architecture. The IoT use cases considered in this paper include smart grid, autonomous vehicles, and eHealth. The proposed framework is evaluated using CPLEX simulations. The results provide insights into mechanisms that can be used to select nodes energy-efficiently whilst meeting the application requirements and other network constraints in multi-layered IoT architectures.}, } @article {pmid37447786, year = {2023}, author = {Li, H and Liu, X and Zhao, W}, title = {Research on Lightweight Microservice Composition Technology in Cloud-Edge Device Scenarios.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {13}, pages = {}, pmid = {37447786}, issn = {1424-8220}, support = {2022YFB330570//National Key R&D Program of China/ ; 21511104302//Shanghai Science and Technology Innovation Action/ ; }, mesh = {*Software ; *Technology ; Workflow ; }, abstract = {In recent years, cloud-native technology has become popular among Internet companies. Microservice architecture solves the complexity problem for multiple service methods by decomposing a single application so that each service can be independently developed, independently deployed, and independently expanded. At the same time, domestic industrial Internet construction is still in its infancy, and small and medium-sized enterprises still face many problems in the process of digital transformation, such as difficult resource integration, complex control equipment workflow, slow development and deployment process, and shortage of operation and maintenance personnel. The existing traditional workflow architecture is mainly aimed at the cloud scenario, which consumes a lot of resources and cannot be used in resource-limited scenarios at the edge. Moreover, traditional workflow is not efficient enough to transfer data and often needs to rely on various storage mechanisms. In this article, a lightweight and efficient workflow architecture is proposed to optimize the defects of these traditional workflows by combining cloud-edge scene. By orchestrating a lightweight workflow engine with a Kubernetes Operator, the architecture can significantly reduce workflow execution time and unify data flow between cloud microservices and edge devices.}, } @article {pmid37447769, year = {2023}, author = {Shruti, and Rani, S and Sah, DK and Gianini, G}, title = {Attribute-Based Encryption Schemes for Next Generation Wireless IoT Networks: A Comprehensive Survey.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {13}, pages = {}, pmid = {37447769}, issn = {1424-8220}, support = {MUSA CUP G43C22001370007, Code ECS000000//European Union/ ; SERICS PE0000001//European Union/ ; }, mesh = {*Computer Security ; *Internet of Things ; Privacy ; Cloud Computing ; Delivery of Health Care ; }, abstract = {Most data nowadays are stored in the cloud; therefore, cloud computing and its extension-fog computing-are the most in-demand services at the present time. Cloud and fog computing platforms are largely used by Internet of Things (IoT) applications where various mobile devices, end users, PCs, and smart objects are connected to each other via the internet. IoT applications are common in several application areas, such as healthcare, smart cities, industries, logistics, agriculture, and many more. Due to this, there is an increasing need for new security and privacy techniques, with attribute-based encryption (ABE) being the most effective among them. ABE provides fine-grained access control, enables secure storage of data on unreliable storage, and is flexible enough to be used in different systems. In this paper, we survey ABE schemes, their features, methodologies, benefits/drawbacks, attacks on ABE, and how ABE can be used with IoT and its applications. This survey reviews ABE models suitable for IoT platforms, taking into account the desired features and characteristics. We also discuss various performance indicators used for ABE and how they affect efficiency. Furthermore, some selected schemes are analyzed through simulation to compare their efficiency in terms of different performance indicators. As a result, we find that some schemes simultaneously perform well in one or two performance indicators, whereas none shines in all of them at once. The work will help researchers identify the characteristics of different ABE schemes quickly and recognize whether they are suitable for specific IoT applications. Future work that may be helpful for ABE is also discussed.}, } @article {pmid37447635, year = {2023}, author = {Irugalbandara, C and Naseem, AS and Perera, S and Kiruthikan, S and Logeeshan, V}, title = {A Secure and Smart Home Automation System with Speech Recognition and Power Measurement Capabilities.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {13}, pages = {}, pmid = {37447635}, issn = {1424-8220}, mesh = {Humans ; Aged ; *Speech Perception ; Speech ; *Voice ; *Persons with Disabilities ; Automation ; }, abstract = {The advancement in the internet of things (IoT) technologies has made it possible to control and monitor electronic devices at home with just the touch of a button. This has made people lead much more comfortable lifestyles. Elderly people and those with disabilities have especially benefited from voice-assisted home automation systems that allow them to control their devices with simple voice commands. However, the widespread use of cloud-based services in these systems, such as those offered by Google and Amazon, has made them vulnerable to cyber-attacks. To ensure the proper functioning of these systems, a stable internet connection and a secure environment free from cyber-attacks are required. However, the quality of the internet is often low in developing countries, which makes it difficult to access the services these systems offer. Additionally, the lack of localization in voice assistants prevents people from using voice-assisted home automation systems in these countries. To address these challenges, this research proposes an offline home automation system. Since the internet and cloud services are not required for an offline system, it can perform its essential functions, while ensuring protection against cyber-attacks and can provide quick responses. It offers additional features, such as power usage tracking and the optimization of linked devices.}, } @article {pmid37447111, year = {2023}, author = {Yan, Y and Xin, Z and Bai, X and Zhan, H and Xi, J and Xie, J and Cheng, Y}, title = {Analysis of Growing Season Normalized Difference Vegetation Index Variation and Its Influencing Factors on the Mongolian Plateau Based on Google Earth Engine.}, journal = {Plants (Basel, Switzerland)}, volume = {12}, number = {13}, pages = {}, pmid = {37447111}, issn = {2223-7747}, support = {31870706//National Natural Science Foundation of China/ ; }, abstract = {Frequent dust storms on the Mongolian Plateau have adversely affected the ecological environmental quality of East Asia. Studying the dynamic changes in vegetation coverage is one of the important means of evaluating ecological environmental quality in the region. In this study, we used Landsat remote sensing images from 2000 to 2019 on the Mongolian Plateau to extract yearly Normalized Difference Vegetation Index (NDVI) data during the growing season. We used partial correlation analysis and the Hurst index to analyze the spatiotemporal characteristics of the NDVI before and after the establishment of nature reserves and their influencing factors on the GEE cloud platform. The results showed that (1) the proportion of the region with an upwards trend of NDVI increased from 52.21% during 2000-2009 to 67.93% during 2010-2019, indicating a clear improvement in vegetation due to increased precipitation; (2) the increase in precipitation and positive human activities drove the increase in the NDVI in the study region from 2000 to 2019; and (3) the overall trend of the NDVI in the future is expected to be stable with a slight decrease, and restoration potential is greater for water bodies and grasslands. Therefore, it is imperative to strengthen positive human activities to safeguard vegetation. These findings furnish scientific evidence for environmental management and the development of ecological engineering initiatives on the Mongolian Plateau.}, } @article {pmid37443425, year = {2024}, author = {Hotchkiss, JT and Ridderman, E and Bufkin, W}, title = {Development of a model and method for hospice quality assessment from natural language processing (NLP) analysis of online caregiver reviews.}, journal = {Palliative & supportive care}, volume = {22}, number = {1}, pages = {19-30}, doi = {10.1017/S1478951523001001}, pmid = {37443425}, issn = {1478-9523}, mesh = {Humans ; United States ; *Hospice Care/psychology ; *Hospices/methods ; Caregivers/psychology ; Retrospective Studies ; Natural Language Processing ; }, abstract = {OBJECTIVES: With a fraction of hospices having their Consumer Assessment of Healthcare Providers and Systems (CAHPS®) scores on Hospice Compare, a significant reservoir of hospice quality data remains in online caregiver reviews. The purpose of this study was to develop a method and model of hospice quality assessment from caregiver reviews using Watson's carative model.

METHODS: Retrospective mixed methods of pilot qualitative thematic analysis and sentiment analysis using NLP of Google and Yelp caregiver reviews between 2013 and 2023. We employed stratified sampling, weighted according to hospice size, to emulate the daily census of enrollees across the United States. Sentiment analysis was performed (n = 3393) using Google NLP.

RESULTS: Two themes with the highest prevalence had moderately positive sentiments (S): Caring staff (+.47) and Care quality, comfort and cleanliness (+.41). Other positive sentiment scores with high prevalence were Gratitude and thanks (+.81), "Treating the patient with respect" (+.54), and "Emotional, spiritual, bereavement support" (+.60). Lowest sentiment scores were "Insurance, administrative or billing" (-.37), "Lack of staffing" (-.32), and "Communication with the family" (-.01).

SIGNIFICANCE OF RESULTS: In the developed quality model, caregivers recommended hospices with caring staff, providing quality care, responsive to requests, and offering family support, including bereavement care. All ten Watson's carative factors and all eight CAHPS measures were presented in the discovered review themes of the quality model. Close-ended CAHPS scores and open-ended online reviews have substantial conceptual overlap and complementary insights. Future hospice quality research should explore caregiver expectations and compare review themes by profit status.}, } @article {pmid37441434, year = {2023}, author = {Gemborn Nilsson, M and Tufvesson, P and Heskebeck, F and Johansson, M}, title = {An open-source human-in-the-loop BCI research framework: method and design.}, journal = {Frontiers in human neuroscience}, volume = {17}, number = {}, pages = {1129362}, pmid = {37441434}, issn = {1662-5161}, abstract = {Brain-computer interfaces (BCIs) translate brain activity into digital commands for interaction with the physical world. The technology has great potential in several applied areas, ranging from medical applications to entertainment industry, and creates new conditions for basic research in cognitive neuroscience. The BCIs of today, however, offer only crude online classification of the user's current state of mind, and more sophisticated decoding of mental states depends on time-consuming offline data analysis. The present paper addresses this limitation directly by leveraging a set of improvements to the analytical pipeline to pave the way for the next generation of online BCIs. Specifically, we introduce an open-source research framework that features a modular and customizable hardware-independent design. This framework facilitates human-in-the-loop (HIL) model training and retraining, real-time stimulus control, and enables transfer learning and cloud computing for the online classification of electroencephalography (EEG) data. Stimuli for the subject and diagnostics for the researcher are shown on separate displays using web browser technologies. Messages are sent using the Lab Streaming Layer standard and websockets. Real-time signal processing and classification, as well as training of machine learning models, is facilitated by the open-source Python package Timeflux. The framework runs on Linux, MacOS, and Windows. While online analysis is the main target of the BCI-HIL framework, offline analysis of the EEG data can be performed with Python, MATLAB, and Julia through packages like MNE, EEGLAB, or FieldTrip. The paper describes and discusses desirable properties of a human-in-the-loop BCI research platform. The BCI-HIL framework is released under MIT license with examples at: bci.lu.se/bci-hil (or at: github.com/bci-hil/bci-hil).}, } @article {pmid37437364, year = {2023}, author = {Baek, H and Yun, WJ and Park, S and Kim, J}, title = {Stereoscopic scalable quantum convolutional neural networks.}, journal = {Neural networks : the official journal of the International Neural Network Society}, volume = {165}, number = {}, pages = {860-867}, doi = {10.1016/j.neunet.2023.06.027}, pmid = {37437364}, issn = {1879-2782}, mesh = {*Computing Methodologies ; *Quantum Theory ; Neural Networks, Computer ; Algorithms ; Cloud Computing ; }, abstract = {As the noisy intermediate-scale quantum (NISQ) era has begun, a quantum neural network (QNN) is definitely a promising solution to many problems that classical neural networks cannot solve. In addition, a quantum convolutional neural network (QCNN) is now receiving a lot of attention because it can process high dimensional inputs comparing to QNN. However, due to the nature of quantum computing, it is difficult to scale up the QCNN to extract a sufficient number of features due to barren plateaus. This is especially challenging in classification operations with high-dimensional data input. However, due to the nature of quantum computing, it is difficult to scale up the QCNN to extract a sufficient number of features due to barren plateaus. This is especially challenging in classification operations with high dimensional data input. Motivated by this, a novel stereoscopic 3D scalable QCNN (sQCNN-3D) is proposed for point cloud data processing in classification applications. Furthermore, reverse fidelity training (RF-Train) is additionally considered on top of sQCNN-3D for diversifying features with a limited number of qubits using the fidelity of quantum computing. Our data-intensive performance evaluation verifies that the proposed algorithm achieves desired performance.}, } @article {pmid37434236, year = {2023}, author = {Glauser, R and Holm, J and Bender, M and Bürkle, T}, title = {How can social robot use cases in healthcare be pushed - with an interoperable programming interface.}, journal = {BMC medical informatics and decision making}, volume = {23}, number = {1}, pages = {118}, pmid = {37434236}, issn = {1472-6947}, mesh = {Humans ; *Robotics ; Social Interaction ; Health Facilities ; Speech ; Delivery of Health Care ; }, abstract = {INTRODUCTION: Research into current robot middleware has revealed that most of them are either too complicated or outdated. These facts have motivated the development of a new middleware to meet the requirements of usability by non-experts. The proposed middleware is based on Android and is intended to be placed over existing robot SDKs and middleware. It runs on the android tablet of the Cruzr robot. Various toolings have been developed, such as a web component to control the robot via a webinterface, which facilitates its use.

METHODS: The middleware was developed using Android Java and runs on the Cruzr tablet as an app. It features a WebSocket server that interfaces with the robot and allows control via Python or other WebSocket-compatible languages. The speech interface utilizes Google Cloud Voice text-to-speech and speech-to-text services. The interface was implemented in Python, allowing for easy integration with existing robotics development workflows, and a web interface was developed for direct control of the robot via the web.

RESULTS: The new robot middleware was created and deployed on a Cruzr robot, relying on the WebSocket API and featuring a Python implementation. It supports various robot functions, such as text-to-speech, speech-to-text, navigation, displaying content and scanning bar codes. The system's architecture allows for porting the interface to other robots and platforms, showcasing its adaptability. It has been demonstrated that the middleware can be run on a Pepper robot, although not all functions have been implemented yet. The middleware was utilized to implement healthcare use cases and received good feedback.

CONCLUSION: Cloud and local speech services were discussed in regard to the middleware's needs, to run without having to change any code on other robots. An outlook on how the programming interface can further be simplified by using natural text to code generators has been/is given. For other researchers using the aforementioned platforms (Cruzr, Pepper), the new middleware can be utilized for testing human-robot interaction. It can be used in a teaching setting, as well as be adapted to other robots using the same interface and philosophy regarding simple methods.}, } @article {pmid37433723, year = {2023}, author = {Vinjerui, KH and Sarheim Anthun, K and Asheim, A and Carlsen, F and Mjølstad, BP and Nilsen, SM and Pape, K and Bjorngaard, JH}, title = {General practitioners ending their practice and impact on patients' health, healthcare use and mortality: a protocol for national registry cohort studies in Norway, 2008 to 2021.}, journal = {BMJ open}, volume = {13}, number = {7}, pages = {e072220}, pmid = {37433723}, issn = {2044-6055}, mesh = {Humans ; *General Practitioners ; *General Practice ; Norway ; Cohort Studies ; Registries ; }, abstract = {INTRODUCTION: Continuous general practitioner (GP) and patient relations associate with positive health outcomes. Termination of GP practice is unavoidable, while consequences of final breaks in relations are less explored. We will study how an ended GP relation affects patient's healthcare utilisation and mortality compared with patients with a continuous GP relation.

METHODS AND ANALYSIS: We link national registries data on individual GP affiliation, sociodemographic characteristics, healthcare use and mortality. From 2008 to 2021, we identify patients whose GP stopped practicing and will compare acute and elective, primary and specialist healthcare use and mortality, with patients whose GP did not stop practicing. We match GP-patient pairs on age and sex (both), immigrant status and education (patients), and number of patients and practice period (GPs). We analyse the outcomes before and after an ended GP-patient relation, using Poisson regression with high-dimensional fixed effects.

ETHICS AND DISSEMINATION: This study protocol is part of the approved project Improved Decisions with Causal Inference in Health Services Research, 2016/2159/REK Midt (the Regional Committees for Medical and Health Research Ethics) and does not require consent. HUNT Cloud provides secure data storage and computing. We will report using the STROBE guideline for observational case-control studies and publish in peer-reviewed journals, accessible in NTNU Open and present at scientific conferences. To reach a broader audience, we will summarise articles in the project's web page, regular and social media, and disseminate to relevant stakeholders.}, } @article {pmid37430789, year = {2023}, author = {Peniak, P and Bubeníková, E and Kanáliková, A}, title = {Validation of High-Availability Model for Edge Devices and IIoT.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {10}, pages = {}, pmid = {37430789}, issn = {1424-8220}, abstract = {Competitiveness in industry requires smooth, efficient, and high-quality operation. For some industrial applications or process control and monitoring applications, it is necessary to achieve high availability and reliability because, for example, the failure of availability in industrial production can have serious consequences for the operation and profitability of the company, as well as for the safety of employees and the surrounding environment. At present, many new technologies that use data obtained from various sensors for evaluation or decision-making require the minimization of data processing latency to meet the needs of real-time applications. Cloud/Fog and Edge computing technologies have been proposed to overcome latency issues and to increase computing power. However, industrial applications also require the high availability and reliability of devices and systems. The potential malfunction of Edge devices can cause a failure of applications, and the unavailability of Edge computing results can have a significant impact on manufacturing processes. Therefore, our article deals with the creation and validation of an enhanced Edge device model, which in contrast to the current solutions, is aimed not only at the integration of various sensors within manufacturing solutions, but also brings the required redundancy to enable the high availability of Edge devices. In the model, we use Edge computing, which performs the recording of sensed data from various types of sensors, synchronizes them, and makes them available for decision making by applications in the Cloud. We focus on creating a suitable Edge device model that works with the redundancy, by using either mirroring or duplexing via a secondary Edge device. This enables high Edge device availability and rapid system recovery in the event of a failure of the primary Edge device. The created model of high availability is based on the mirroring and duplexing of the Edge devices, which support two protocols: OPC UA and MQTT. The models were implemented in the Node-Red software, tested, and subsequently validated and compared to confirm the required recovery time and 100% redundancy of the Edge device. In the contrast to the currently available Edge solutions, our proposed extended model based on Edge mirroring is able to address most of the critical cases, where fast recovery is required, and no adjustments are needed for critical applications. The maturity level of Edge high availability can be further extended by applying Edge duplexing for process control.}, } @article {pmid37430776, year = {2023}, author = {Tahir, A and Bai, S and Shen, M}, title = {A Wearable Multi-Modal Digital Upper Limb Assessment System for Automatic Musculoskeletal Risk Evaluation.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {10}, pages = {}, pmid = {37430776}, issn = {1424-8220}, mesh = {Humans ; Upper Extremity ; Algorithms ; Computers, Handheld ; Databases, Factual ; *Musculoskeletal Diseases/diagnosis ; *Wearable Electronic Devices ; }, abstract = {Continuous ergonomic risk assessment of the human body is critical to avoid various musculoskeletal disorders (MSDs) for people involved in physical jobs. This paper presents a digital upper limb assessment (DULA) system that automatically performs rapid upper limb assessment (RULA) in real-time for the timely intervention and prevention of MSDs. While existing approaches require human resources for computing the RULA score, which is highly subjective and untimely, the proposed DULA achieves automatic and objective assessment of musculoskeletal risks using a wireless sensor band embedded with multi-modal sensors. The system continuously tracks and records upper limb movements and muscle activation levels and automatically generates musculoskeletal risk levels. Moreover, it stores the data in a cloud database for in-depth analysis by a healthcare expert. Limb movements and muscle fatigue levels can also be visually seen using any tablet/computer in real-time. In the paper, algorithms of robust limb motion detection are developed, and an explanation of the system is provided along with the presentation of preliminary results, which validate the effectiveness of the new technology.}, } @article {pmid37430596, year = {2023}, author = {Xu, Q and Zhang, G and Wang, J}, title = {Research on Cloud-Edge-End Collaborative Computing Offloading Strategy in the Internet of Vehicles Based on the M-TSA Algorithm.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {10}, pages = {}, pmid = {37430596}, issn = {1424-8220}, support = {202101AS070016//the "Yunnan Xingdian Talents Support Plan" project of Yunnan and Key Projects of Yunnan Basic Research Plan/ ; 202101AS070016//the Yunnan Province Basic Research Program Key Funding Project/ ; }, abstract = {In the Internet of Vehicles scenario, the in-vehicle terminal cannot meet the requirements of computing tasks in terms of delay and energy consumption; the introduction of cloud computing and MEC is an effective way to solve the above problem. The in-vehicle terminal requires a high task processing delay, and due to the high delay of cloud computing to upload computing tasks to the cloud, the MEC server has limited computing resources, which will increase the task processing delay when there are more tasks. To solve the above problems, a vehicle computing network based on cloud-edge-end collaborative computing is proposed, in which cloud servers, edge servers, service vehicles, and task vehicles themselves can provide computing services. A model of the cloud-edge-end collaborative computing system for the Internet of Vehicles is constructed, and a computational offloading strategy problem is given. Then, a computational offloading strategy based on the M-TSA algorithm and combined with task prioritization and computational offloading node prediction is proposed. Finally, comparative experiments are conducted under task instances simulating real road vehicle conditions to demonstrate the superiority of our network, where our offloading strategy significantly improves the utility of task offloading and reduces offloading delay and energy consumption.}, } @article {pmid37430552, year = {2023}, author = {Mostafa, N and Kotb, Y and Al-Arnaout, Z and Alabed, S and Shdefat, AY}, title = {Replicating File Segments between Multi-Cloud Nodes in a Smart City: A Machine Learning Approach.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {10}, pages = {}, pmid = {37430552}, issn = {1424-8220}, abstract = {The design and management of smart cities and the IoT is a multidimensional problem. One of those dimensions is cloud and edge computing management. Due to the complexity of the problem, resource sharing is one of the vital and major components that when enhanced, the performance of the whole system is enhanced. Research in data access and storage in multi-clouds and edge servers can broadly be classified to data centers and computational centers. The main aim of data centers is to provide services for accessing, sharing and modifying large databases. On the other hand, the aim of computational centers is to provide services for sharing resources. Present and future distributed applications need to deal with very large multi-petabyte datasets and increasing numbers of associated users and resources. The emergence of IoT-based, multi-cloud systems as a potential solution for large computational and data management problems has initiated significant research activity in the area. Due to the considerable increase in data production and data sharing within scientific communities, the need for improvements in data access and data availability cannot be overlooked. It can be argued that the current approaches of large dataset management do not solve all problems associated with big data and large datasets. The heterogeneity and veracity of big data require careful management. One of the issues for managing big data in a multi-cloud system is the scalability and expendability of the system under consideration. Data replication ensures server load balancing, data availability and improved data access time. The proposed model minimises the cost of data services through minimising a cost function that takes storage cost, host access cost and communication cost into consideration. The relative weights between different components is learned through history and it is different from a cloud to another. The model ensures that data are replicated in a way that increases availability while at the same time decreasing the overall cost of data storage and access time. Using the proposed model avoids the overheads of the traditional full replication techniques. The proposed model is mathematically proven to be sound and valid.}, } @article {pmid37430518, year = {2023}, author = {Duong, PN and Lee, H}, title = {Pipelined Key Switching Accelerator Architecture for CKKS-Based Fully Homomorphic Encryption.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {10}, pages = {}, pmid = {37430518}, issn = {1424-8220}, abstract = {The increasing ubiquity of big data and cloud-based computing has led to increased concerns regarding the privacy and security of user data. In response, fully homomorphic encryption (FHE) was developed to address this issue by enabling arbitrary computation on encrypted data without decryption. However, the high computational costs of homomorphic evaluations restrict the practical application of FHE schemes. To tackle these computational and memory challenges, a variety of optimization approaches and acceleration efforts are actively being pursued. This paper introduces the KeySwitch module, a highly efficient and extensively pipelined hardware architecture designed to accelerate the costly key switching operation in homomorphic computations. Built on top of an area-efficient number-theoretic transform design, the KeySwitch module exploited the inherent parallelism of key switching operation and incorporated three main optimizations: fine-grained pipelining, on-chip resource usage, and high-throughput implementation. An evaluation on the Xilinx U250 FPGA platform demonstrated a 1.6× improvement in data throughput compared to previous work with more efficient hardware resource utilization. This work contributes to the development of advanced hardware accelerators for privacy-preserving computations and promoting the adoption of FHE in practical applications with enhanced efficiency.}, } @article {pmid37424120, year = {2023}, author = {Balla, Y and Tirunagari, S and Windridge, D}, title = {Pediatrics in Artificial Intelligence Era: A Systematic Review on Challenges, Opportunities, and Explainability.}, journal = {Indian pediatrics}, volume = {60}, number = {7}, pages = {561-569}, pmid = {37424120}, issn = {0974-7559}, mesh = {*Artificial Intelligence ; *Pediatrics ; *Clinical Decision-Making ; Humans ; Child, Preschool ; Child ; Deep Learning ; }, abstract = {BACKGROUND: The emergence of artificial intelligence (AI) tools such as ChatGPT and Bard is disrupting a broad swathe of fields, including medicine. In pediatric medicine, AI is also increasingly being used across multiple subspecialties. However, the practical application of AI still faces a number of key challenges. Consequently, there is a requirement for a concise overview of the roles of AI across the multiple domains of pediatric medicine, which the current study seeks to address.

AIM: To systematically assess the challenges, opportunities, and explainability of AI in pediatric medicine.

METHODOLOGY: A systematic search was carried out on peer-reviewed databases, PubMed Central, Europe PubMed Central, and grey literature using search terms related to machine learning (ML) and AI for the years 2016 to 2022 in the English language. A total of 210 articles were retrieved that were screened with PRISMA for abstract, year, language, context, and proximal relevance to research aims. A thematic analysis was carried out to extract findings from the included studies.

RESULTS: Twenty articles were selected for data abstraction and analysis, with three consistent themes emerging from these articles. In particular, eleven articles address the current state-of-the-art application of AI in diagnosing and predicting health conditions such as behavioral and mental health, cancer, syndromic and metabolic diseases. Five articles highlight the specific challenges of AI deployment in pediatric medicines: data security, handling, authentication, and validation. Four articles set out future opportunities for AI to be adapted: the incorporation of Big Data, cloud computing, precision medicine, and clinical decision support systems. These studies collectively critically evaluate the potential of AI in overcoming current barriers to adoption.

CONCLUSION: AI is proving disruptive within pediatric medicine and is presently associated with challenges, opportunities, and the need for explainability. AI should be viewed as a tool to enhance and support clinical decision-making rather than a substitute for human judgement and expertise. Future research should consequently focus on obtaining comprehensive data to ensure the generalizability of research findings.}, } @article {pmid37420777, year = {2023}, author = {Mari, D and Camuffo, E and Milani, S}, title = {CACTUS: Content-Aware Compression and Transmission Using Semantics for Automotive LiDAR Data.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {12}, pages = {}, pmid = {37420777}, issn = {1424-8220}, support = {PE0000001//Italian National Recovery and Resilience Plan (NRRP) of NextGenerationEU, partnership on "Telecommunications of the Future" (PE0000001 - program "RESTART")./ ; SID 2018 project SartreMR//University of Padua/ ; Project SYCURI//University of Padova/ ; }, mesh = {*Semantics ; Awareness ; *Data Compression ; Physical Phenomena ; }, abstract = {Many recent cloud or edge computing strategies for automotive applications require transmitting huge amounts of Light Detection and Ranging (LiDAR) data from terminals to centralized processing units. As a matter of fact, the development of effective Point Cloud (PC) compression strategies that preserve semantic information, which is critical for scene understanding, proves to be crucial. Segmentation and compression have always been treated as two independent tasks; however, since not all the semantic classes are equally important for the end task, this information can be used to guide data transmission. In this paper, we propose Content-Aware Compression and Transmission Using Semantics (CACTUS), which is a coding framework that exploits semantic information to optimize the data transmission, partitioning the original point set into separate data streams. Experimental results show that differently from traditional strategies, the independent coding of semantically consistent point sets preserves class information. Additionally, whenever semantic information needs to be transmitted to the receiver, using the CACTUS strategy leads to gains in terms of compression efficiency, and more in general, it improves the speed and flexibility of the baseline codec used to compress the data.}, } @article {pmid37420742, year = {2023}, author = {Prauzek, M and Kucova, T and Konecny, J and Adamikova, M and Gaiova, K and Mikus, M and Pospisil, P and Andriukaitis, D and Zilys, M and Martinkauppi, B and Koziorek, J}, title = {IoT Sensor Challenges for Geothermal Energy Installations Monitoring: A Survey.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {12}, pages = {}, pmid = {37420742}, issn = {1424-8220}, support = {SP2023/009//Student Grant System, VSB-TU Ostrava/ ; No. 856670//European Union's Horizon 2020 research and innovation programme/ ; }, mesh = {*Geothermal Energy ; Cloud Computing ; Information Technology ; *Internet of Things ; Technology ; }, abstract = {Geothermal energy installations are becoming increasingly common in new city developments and renovations. With a broad range of technological applications and improvements in this field, the demand for suitable monitoring technologies and control processes for geothermal energy installations is also growing. This article identifies opportunities for the future development and deployment of IoT sensors applied to geothermal energy installations. The first part of the survey describes the technologies and applications of various sensor types. Sensors that monitor temperature, flow rate and other mechanical parameters are presented with a technological background and their potential applications. The second part of the article surveys Internet-of-Things (IoT), communication technology and cloud solutions applicable to geothermal energy monitoring, with a focus on IoT node designs, data transmission technologies and cloud services. Energy harvesting technologies and edge computing methods are also reviewed. The survey concludes with a discussion of research challenges and an outline of new areas of application for monitoring geothermal installations and innovating technologies to produce IoT sensor solutions.}, } @article {pmid37409083, year = {2023}, author = {Sun, X and Li, S}, title = {Multi-sensor network tracking research utilizing searchable encryption algorithm in the cloud computing environment.}, journal = {PeerJ. Computer science}, volume = {9}, number = {}, pages = {e1433}, pmid = {37409083}, issn = {2376-5992}, abstract = {Presently, the focus of target detection is shifting towards the integration of information acquired from multiple sensors. When faced with a vast amount of data from various sensors, ensuring data security during transmission and storage in the cloud becomes a primary concern. Data files can be encrypted and stored in the cloud. When using data, the required data files can be returned through ciphertext retrieval, and then searchable encryption technology can be developed. However, the existing searchable encryption algorithms mainly ignore the data explosion problem in a cloud computing environment. The issue of authorised access under cloud computing has yet to be solved uniformly, resulting in a waste of computing power by data users when processing more and more data. Furthermore, to save computing resources, ECS (encrypted cloud storage) may only return a fragment of results in response to a search query, lacking a practical and universal verification mechanism. Therefore, this article proposes a lightweight, fine-grained searchable encryption scheme tailored to the cloud edge computing environment. We generate ciphertext and search trap gates for terminal devices based on bilinear pairs and introduce access policies to restrict ciphertext search permissions, which improves the efficiency of ciphertext generation and retrieval. This scheme allows for encryption and trapdoor calculation generation on auxiliary terminal devices, with complex calculations carried out on edge devices. The resulting method ensures secure data access, fast search in multi-sensor network tracking, and accelerates computing speed while maintaining data security. Ultimately, experimental comparisons and analyses demonstrate that the proposed method improves data retrieval efficiency by approximately 62%, reduces the storage overhead of the public key, ciphertext index, and verifiable searchable ciphertext by half, and effectively mitigates delays in data transmission and computation processes.}, } @article {pmid37406681, year = {2023}, author = {Li, J and Liu, Z}, title = {Sensor-based cloud computing data system and long distance running fatigue assessment.}, journal = {Preventive medicine}, volume = {173}, number = {}, pages = {107604}, doi = {10.1016/j.ypmed.2023.107604}, pmid = {37406681}, issn = {1096-0260}, mesh = {Humans ; *Cloud Computing ; Reproducibility of Results ; Data Systems ; *Running ; }, abstract = {Wireless sensor networks are widely used in sports training, medical and health care, smart home, environmental monitoring, cloud data and other fields because of their large scale, self-organization, reliability, dynamic, integration and data centralization. Based on this point, this article conducts a comprehensive analysis and research on cloud computing data systems, and designs and implements a dynamic replication strategy. Since different users have different demands for different data at different times, it is necessary to record and analyze recent users' data access, so as to actively adjust the number and location of data blocks. Subsequently, a multi-source blockchain transmission method was proposed and implemented, which can significantly reduce the time cost of data migration and improve the overall performance of cloud storage data systems. Finally, the article provides an in-depth analysis of long-distance running fatigue. This study will design a simulated specialized exercise load experiment to reproduce the load characteristics of excellent athletes during mid to long distance running, in order to induce exercise fatigue in the main muscles of different parts of their bodies. At the same time, the amplitude frequency joint analysis of the surface changes of EMG signal in this process is carried out. This article conducts research on sensor based cloud computing data systems and long-distance running fatigue assessment, promoting the development of cloud computing data systems and improving long-distance running fatigue assessment methods.}, } @article {pmid37398279, year = {2023}, author = {Huang, X and Struck, TJ and Davey, SW and Gutenkunst, RN}, title = {dadi-cli: Automated and distributed population genetic model inference from allele frequency spectra.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {37398279}, issn = {2692-8205}, support = {R01 GM127348/GM/NIGMS NIH HHS/United States ; R35 GM149235/GM/NIGMS NIH HHS/United States ; }, abstract = {SUMMARY: dadi is a popular software package for inferring models of demographic history and natural selection from population genomic data. But using dadi requires Python scripting and manual parallelization of optimization jobs. We developed dadi-cli to simplify dadi usage and also enable straighforward distributed computing.

dadi-cli is implemented in Python and released under the Apache License 2.0. The source code is available at https://github.com/xin-huang/dadi-cli . dadi-cli can be installed via PyPI and conda, and is also available through Cacao on Jetstream2 https://cacao.jetstream-cloud.org/ .}, } @article {pmid37396881, year = {2023}, author = {Chaudhari, PB and Banga, A}, title = {Writing strategies for improving the access of medical literature.}, journal = {World journal of experimental medicine}, volume = {13}, number = {3}, pages = {50-58}, doi = {10.5493/wjem.v13.i3.50}, pmid = {37396881}, issn = {2220-315X}, abstract = {When conducting a literature review, medical authors typically search for relevant keywords in bibliographic databases or on search engines like Google. After selecting the most pertinent article based on the title's relevance and the abstract's content, they download or purchase the article and cite it in their manuscript. Three major elements influence whether an article will be cited in future manuscripts: the keywords, the title, and the abstract. This indicates that these elements are the "key dissemination tools" for research papers. If these three elements are not determined judiciously by authors, it may adversely affect the manuscript's retrievability, readability, and citation index, which can negatively impact both the author and the journal. In this article, we share our informed perspective on writing strategies to enhance the searchability and citation of medical articles. These strategies are adopted from the principles of search engine optimization, but they do not aim to cheat or manipulate the search engine. Instead, they adopt a reader-centric content writing methodology that targets well-researched keywords to the readers who are searching for them. Reputable journals, such as Nature and the British Medical Journal, emphasize "online searchability" in their author guidelines. We hope that this article will encourage medical authors to approach manuscript drafting from the perspective of "looking inside-out." In other words, they should not only draft manuscripts around what they want to convey to fellow researchers but also integrate what the readers want to discover. It is a call-to-action to better understand and engage search engine algorithms, so they yield information in a desired and self-learning manner because the "Cloud" is the new stakeholder.}, } @article {pmid37396052, year = {2023}, author = {Qureshi, R and Irfan, M and Gondal, TM and Khan, S and Wu, J and Hadi, MU and Heymach, J and Le, X and Yan, H and Alam, T}, title = {AI in drug discovery and its clinical relevance.}, journal = {Heliyon}, volume = {9}, number = {7}, pages = {e17575}, pmid = {37396052}, issn = {2405-8440}, abstract = {The COVID-19 pandemic has emphasized the need for novel drug discovery process. However, the journey from conceptualizing a drug to its eventual implementation in clinical settings is a long, complex, and expensive process, with many potential points of failure. Over the past decade, a vast growth in medical information has coincided with advances in computational hardware (cloud computing, GPUs, and TPUs) and the rise of deep learning. Medical data generated from large molecular screening profiles, personal health or pathology records, and public health organizations could benefit from analysis by Artificial Intelligence (AI) approaches to speed up and prevent failures in the drug discovery pipeline. We present applications of AI at various stages of drug discovery pipelines, including the inherently computational approaches of de novo design and prediction of a drug's likely properties. Open-source databases and AI-based software tools that facilitate drug design are discussed along with their associated problems of molecule representation, data collection, complexity, labeling, and disparities among labels. How contemporary AI methods, such as graph neural networks, reinforcement learning, and generated models, along with structure-based methods, (i.e., molecular dynamics simulations and molecular docking) can contribute to drug discovery applications and analysis of drug responses is also explored. Finally, recent developments and investments in AI-based start-up companies for biotechnology, drug design and their current progress, hopes and promotions are discussed in this article.}, } @article {pmid39664871, year = {2023}, author = {Park, M and Oh, N and Jung, YG}, title = {Digital Twins in Healthcare and Their Applicability in Rhinology: A Narrative Review.}, journal = {Journal of rhinology : official journal of the Korean Rhinologic Society}, volume = {30}, number = {2}, pages = {80-86}, pmid = {39664871}, issn = {2384-4361}, abstract = {Digital twins were initially introduced in the aerospace industry, but they have been applied to the medical field in the 2020s. The development of the Internet of Things, sensor technology, cloud computing, big data analysis, and simulation technology has made this idea feasible. Essentially, digital twins are virtual representations of real-world data that can generate virtual outcomes related to a patient based on their actual data. With this technology, doctors can predict treatment outcomes, plan surgery, and monitor patients' medical conditions in real time. While digital twins have endless potential, challenges include the need to deal with vast amounts of data and ensure the security of personal information. In the field of rhinology, which deals with complex anatomy from the sinus to the skull base, the adoption of digital twins is just beginning. Digital twins have begun to be incorporated into surgical navigation and the management of chronic diseases such as chronic rhinosinusitis. Despite the limitless potential of digital twins, challenges related to dealing with vast amounts of data and enhancing the security of personal data need to be surmounted for this method to be more widely applied.}, } @article {pmid37383277, year = {2022}, author = {Parks, DF and Voitiuk, K and Geng, J and Elliott, MAT and Keefe, MG and Jung, EA and Robbins, A and Baudin, PV and Ly, VT and Hawthorne, N and Yong, D and Sanso, SE and Rezaee, N and Sevetson, JL and Seiler, ST and Currie, R and Pollen, AA and Hengen, KB and Nowakowski, TJ and Mostajo-Radji, MA and Salama, SR and Teodorescu, M and Haussler, D}, title = {IoT cloud laboratory: Internet of Things architecture for cellular biology.}, journal = {Internet of things (Amsterdam, Netherlands)}, volume = {20}, number = {}, pages = {}, pmid = {37383277}, issn = {2542-6605}, support = {R01 MH120295/MH/NIMH NIH HHS/United States ; R01 NS118442/NS/NINDS NIH HHS/United States ; RM1 HG011543/HG/NHGRI NIH HHS/United States ; T32 HG008345/HG/NHGRI NIH HHS/United States ; }, abstract = {The Internet of Things (IoT) provides a simple framework to control online devices easily. IoT is now a commonplace tool used by technology companies but is rarely used in biology experiments. IoT can benefit cloud biology research through alarm notifications, automation, and the real-time monitoring of experiments. We developed an IoT architecture to control biological devices and implemented it in lab experiments. Lab devices for electrophysiology, microscopy, and microfluidics were created from the ground up to be part of a unified IoT architecture. The system allows each device to be monitored and controlled from an online web tool. We present our IoT architecture so other labs can replicate it for their own experiments.}, } @article {pmid37370966, year = {2023}, author = {Nancy, AA and Ravindran, D and Vincent, DR and Srinivasan, K and Chang, CY}, title = {Fog-Based Smart Cardiovascular Disease Prediction System Powered by Modified Gated Recurrent Unit.}, journal = {Diagnostics (Basel, Switzerland)}, volume = {13}, number = {12}, pages = {}, pmid = {37370966}, issn = {2075-4418}, support = {Intelligent Recognition Industry Service Research Center from the Featured Areas Research Center Program within the framework of the Higher Education Sprout Project//Ministry of Education, Taiwan/ ; MOST 109-2221-E-224-048-MY2//Ministry of Science and Technology,Taiwan/ ; }, abstract = {The ongoing fast-paced technology trend has brought forth ceaseless transformation. In this regard, cloud computing has long proven to be the paramount deliverer of services such as computing power, software, networking, storage, and databases on a pay-per-use basis. The cloud is a big proponent of the internet of things (IoT), furnishing the computation and storage requisite to address internet-of-things applications. With the proliferating IoT devices triggering a continual data upsurge, the cloud-IoT interaction encounters latency, bandwidth, and connectivity restraints. The inclusion of the decentralized and distributed fog computing layer amidst the cloud and IoT layer extends the cloud's processing, storage, and networking services close to end users. This hierarchical edge-fog-cloud model distributes computation and intelligence, yielding optimal solutions while tackling constraints like massive data volume, latency, delay, and security vulnerability. The healthcare domain, warranting time-critical functionalities, can reap benefits from the cloud-fog-IoT interplay. This research paper propounded a fog-assisted smart healthcare system to diagnose heart or cardiovascular disease. It combined a fuzzy inference system (FIS) with the recurrent neural network model's variant of the gated recurrent unit (GRU) for pre-processing and predictive analytics tasks. The proposed system showcases substantially improved performance results, with classification accuracy at 99.125%. With major processing of healthcare data analytics happening at the fog layer, it is observed that the proposed work reveals optimized results concerning delays in terms of latency, response time, and jitter, compared to the cloud. Deep learning models are adept at handling sophisticated tasks, particularly predictive analytics. Time-critical healthcare applications reap benefits from deep learning's exclusive potential to furnish near-perfect results, coupled with the merits of the decentralized fog model, as revealed by the experimental results.}, } @article {pmid37369699, year = {2023}, author = {Mateo-Garcia, G and Veitch-Michaelis, J and Purcell, C and Longepe, N and Reid, S and Anlind, A and Bruhn, F and Parr, J and Mathieu, PP}, title = {In-orbit demonstration of a re-trainable machine learning payload for processing optical imagery.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {10391}, pmid = {37369699}, issn = {2045-2322}, support = {PID2019-109026RB-I00, ERDF//Ministerio de Ciencia e Innovación/ ; }, abstract = {Cognitive cloud computing in space (3CS) describes a new frontier of space innovation powered by Artificial Intelligence, enabling an explosion of new applications in observing our planet and enabling deep space exploration. In this framework, machine learning (ML) payloads-isolated software capable of extracting high level information from onboard sensors-are key to accomplish this vision. In this work we demonstrate, in a satellite deployed in orbit, a ML payload called 'WorldFloods' that is able to send compressed flood maps from sensed images. In particular, we perform a set of experiments to: (1) compare different segmentation models on different processing variables critical for onboard deployment, (2) show that we can produce, onboard, vectorised polygons delineating the detected flood water from a full Sentinel-2 tile, (3) retrain the model with few images of the onboard sensor downlinked to Earth and (4) demonstrate that this new model can be uplinked to the satellite and run on new images acquired by its camera. Overall our work demonstrates that ML-based models deployed in orbit can be updated if new information is available, paving the way for agile integration of onboard and onground processing and "on the fly" continuous learning.}, } @article {pmid37362704, year = {2023}, author = {Mahajan, HB and Junnarkar, AA}, title = {Smart healthcare system using integrated and lightweight ECC with private blockchain for multimedia medical data processing.}, journal = {Multimedia tools and applications}, volume = {}, number = {}, pages = {1-24}, pmid = {37362704}, issn = {1380-7501}, abstract = {Cloud-based Healthcare 4.0 systems have research challenges with secure medical data processing, especially biomedical image processing with privacy protection. Medical records are generally text/numerical or multimedia. Multimedia data includes X-ray scans, Computed Tomography (CT) scans, Magnetic Resonance Imaging (MRI) scans, etc. Transferring biomedical multimedia data to medical authorities raises various security concerns. This paper proposes a one-of-a-kind blockchain-based secure biomedical image processing system that maintains anonymity. The integrated Healthcare 4.0 assisted multimedia image processing architecture includes an edge layer, fog computing layer, cloud storage layer, and blockchain layer. The edge layer collects and sends periodic medical information from the patient to the higher layer. The multimedia data from the edge layer is securely preserved in blockchain-assisted cloud storage through fog nodes using lightweight cryptography. Medical users then safely search such data for medical treatment or monitoring. Lightweight cryptographic procedures are proposed by employing Elliptic Curve Cryptography (ECC) with Elliptic Curve Diffie-Hellman (ECDH) and Elliptic Curve Digital Signature (ECDS) algorithm to secure biomedical image processing while maintaining privacy (ECDSA). The proposed technique is experimented with using publically available chest X-ray and CT images. The experimental results revealed that the proposed model shows higher computational efficiency (encryption and decryption time), Peak to Signal Noise Ratio (PSNR), and Meas Square Error (MSE).}, } @article {pmid37361469, year = {2023}, author = {Ansari, M and Alam, M}, title = {An Intelligent IoT-Cloud-Based Air Pollution Forecasting Model Using Univariate Time-Series Analysis.}, journal = {Arabian journal for science and engineering}, volume = {}, number = {}, pages = {1-28}, pmid = {37361469}, issn = {2193-567X}, abstract = {Air pollution is a significant environmental issue affecting public health and ecosystems worldwide, resulting from various sources such as industrial activities, vehicle emissions, and fossil fuel burning. Air pollution contributes to climate change and can cause several health problems, such as respiratory illnesses, cardiovascular disease, and cancer. A potential solution to this problem has been proposed by using different artificial intelligence (AI) and time-series models. These models are implemented in the cloud environment to forecast the Air Quality Index (AQI) utilizing Internet of things (IoT) devices. The recent influx of IoT-enabled time-series air pollution data poses challenges for traditional models. Various approaches have been explored to forecast AQI in the cloud environment using IoT devices. The primary objective of this study is to assess the efficacy of an IoT-Cloud-based model for forecasting the AQI under different meteorological conditions. To achieve this, we proposed a novel BO-HyTS approach that combines seasonal autoregressive integrated moving average (SARIMA) and long short-term memory (LSTM) and fine-tuned it by using Bayesian optimization to predict air pollution levels. The proposed BO-HyTS model can capture both linear and nonlinear characteristics of the time-series data, thus augmenting the accuracy of the forecasting process. Additionally, several AQI forecasting models, including classical time-series, machine learning, and deep learning, are employed to forecast air quality from time-series data. Five statistical evaluation metrics are incorporated to evaluate the effectiveness of models. While comparing the various algorithms among themselves becomes difficult, a non-parametric statistical significance test (Friedman test) is applied to assess the performance of the different machine learning, time-series, and deep learning models. The findings reveal that the proposed BO-HyTS model produced significantly better results than their competitor's, providing the most accurate and efficient forecasting model, with an MSE of 632.200, RMSE of 25.14, Med AE of 19.11, Max Error of 51.52, and MAE of 20.49. The results of this study provide insights into the future patterns of AQI in various Indian states and set a standard for these states as governments develop their healthcare policies accordingly. The proposed BO-HyTS model has the potential to inform policy decisions and enable governments and organizations to protect better and manage the environment beforehand.}, } @article {pmid37361138, year = {2023}, author = {Li, X and Pan, L and Liu, S}, title = {A DRL-based online VM scheduler for cost optimization in cloud brokers.}, journal = {World wide web}, volume = {}, number = {}, pages = {1-27}, pmid = {37361138}, issn = {1573-1413}, abstract = {The virtual machine (VM) scheduling problem in cloud brokers that support cloud bursting is fraught with uncertainty due to the on-demand nature of Infrastructure as a Service (IaaS) VMs. Until a VM request is received, the scheduler does not know in advance when it will arrive or what configurations it demands. Even when a VM request is received, the scheduler does not know when the VM's lifecycle expires. Existing studies begin to use deep reinforcement learning (DRL) to solve such scheduling problems. However, they do not address how to guarantee the QoS of user requests. In this paper, we investigate a cost optimization problem for online VM scheduling in cloud brokers for cloud bursting to minimize the cost spent on public clouds while satisfying specified QoS restrictions. We propose DeepBS, a DRL-based online VM scheduler in a cloud broker which learns from experience to adaptively improve scheduling strategies in environments with non-smooth and uncertain user requests. We evaluate the performance of DeepBS under two request arrival patterns which are respectively based on Google and Alibaba cluster traces, and the experiments show that DeepBS has a significant advantage over other benchmark algorithms in terms of cost optimization.}, } @article {pmid37361100, year = {2023}, author = {Pandey, NK and Kumar, K and Saini, G and Mishra, AK}, title = {Security issues and challenges in cloud of things-based applications for industrial automation.}, journal = {Annals of operations research}, volume = {}, number = {}, pages = {1-20}, pmid = {37361100}, issn = {0254-5330}, abstract = {Due to the COVID-19 outbreak, industries have gained a thrust on contactless processing for computing technologies and industrial automation. Cloud of Things (CoT) is one of the emerging computing technologies for such applications. CoT combines the most emerging cloud computing and the Internet of Things. The development in industrial automation made them highly interdependent because the cloud computing works like a backbone in IoT technology. This supports the data storage, analytics, processing, commercial application development, deployment, and security compliances. Now amalgamation of cloud technologies with IoT is making utilities more useful, smart, service-oriented, and secure application for sustainable development of industrial processes. As the pandemic has increased access to computing utilities remotely, cyber-attacks have been increased exponentially. This paper reviews the CoT's contribution to industrial automation and the various security features provided by different tools and applications used for the circular economy. The in-depth analysis of security threats, availability of different features corresponding the security issues in traditional and non-traditional CoT platforms used in industrial automation have been analysed. The security issues and challenges faced by IIoT and AIoT in industrial automation have also been addressed.}, } @article {pmid37360775, year = {2023}, author = {Lucia, C and Zhiwei, G and Michele, N}, title = {Biometrics for Industry 4.0: a survey of recent applications.}, journal = {Journal of ambient intelligence and humanized computing}, volume = {}, number = {}, pages = {1-23}, pmid = {37360775}, issn = {1868-5137}, abstract = {The Fourth Industrial Revolution, also known as Industry 4.0, represents the rise of digital industrial technology that is propagating at an exponential rate compared to the previous three revolutions. Interoperability is a basis of production, where there is a continuous exchange of information between machines and production units that act autonomously and intelligently. Workers play a central role in making autonomous decisions and using advanced technological tools. It may involve using measures that distinguish individuals, and their behaviours and reactions. Increasing the level of security, allowing only authorized personnel access to designated areas, and promoting worker welfare can have a positive impact on the entire assembly line. Thus, capturing biometric information, with or without individuals' knowledge, could allow identity verification and monitoring of of their emotional and cognitive states during the daily actions of work life. From the study of the literature, we outline three macro categories in which the principles of Industry 4.0 are merged and the functionalities of biometric systems are exploited: security, health monitoring, and quality work life analysis. In this review, we present an overview of all biometric features used in the context of Industry 4.0 with a focus on their advantages, limitations, and practical use. Attention is also paid to future research directions for which new answers are being explored.}, } @article {pmid37360142, year = {2023}, author = {Hemdan, EE and El-Shafai, W and Sayed, A}, title = {Integrating Digital Twins with IoT-Based Blockchain: Concept, Architecture, Challenges, and Future Scope.}, journal = {Wireless personal communications}, volume = {}, number = {}, pages = {1-24}, pmid = {37360142}, issn = {0929-6212}, abstract = {In recent years, there have been concentrations on the Digital Twin from researchers and companies due to its advancement in IT, communication systems, Cloud Computing, Internet-of-Things (IoT), and Blockchain. The main concept of the DT is to provide a comprehensive tangible, and operational explanation of any element, asset, or system. However, it is an extremely dynamic taxonomy developing in complication during the life cycle that produces an enormous quantity of the engendered data and information from them. Likewise, with the development of the Blockchain, the digital twins have the potential to redefine and could be a key strategy to support the IoT-based digital twin's applications for transferring data and value onto the Internet with full transparency besides promising accessibility, trusted traceability, and immutability of transactions. Therefore, the integration of digital twins with the IoT and blockchain technologies has the potential to revolutionize various industries by providing enhanced security, transparency, and data integrity. Thus, this work presents a survey on the innovative theme of digital twins with the integration of Blockchain for various applications. Also, provides challenges and future research directions on this subject. In addition, in this paper, we propose a concept and architecture for integrating digital twins with IoT-based blockchain archives, which allows for real-time monitoring and control of physical assets and processes in a secure and decentralized manner. We also discuss the challenges and limitations of this integration, including issues related to data privacy, scalability, and interoperability. Finally, we provide insights into the future scope of this technology and discuss potential research directions for further improving the integration of digital twins with IoT-based blockchain archives. Overall, this paper provides a comprehensive overview of the potential benefits and challenges of integrating digital twins with IoT-based blockchain and lays the foundation for future research in this area.}, } @article {pmid37360131, year = {2023}, author = {Gupta, A and Singh, A}, title = {Prediction Framework on Early Urine Infection in IoT-Fog Environment Using XGBoost Ensemble Model.}, journal = {Wireless personal communications}, volume = {}, number = {}, pages = {1-19}, pmid = {37360131}, issn = {0929-6212}, abstract = {Urine infections are one of the most prevalent concerns for the healthcare industry that may impair the functioning of the kidney and other renal organs. As a result, early diagnosis and treatment of such infections are essential to avert any future complications. Conspicuously, in the current work, an intelligent system for the early prediction of urine infections has been presented. The proposed framework uses IoT-based sensors for data collection, followed by data encoding and infectious risk factor computation using the XGBoost algorithm over the fog computing platform. Finally, the analysis results along with the health-related information of users are stored in the cloud repository for future analysis. For performance validation, extensive experiments have been carried out, and results are calculated based on real-time patient data. The statistical findings of accuracy (91.45%), specificity (95.96%), sensitivity (84.79%), precision (95.49%), and f-score(90.12%) reveal the significantly improved performance of the proposed strategy over other baseline techniques.}, } @article {pmid37358302, year = {2023}, author = {Castronova, AM and Nassar, A and Knoben, W and Fienen, MN and Arnal, L and Clark, M}, title = {Community Cloud Computing Infrastructure to Support Equitable Water Research and Education.}, journal = {Ground water}, volume = {61}, number = {5}, pages = {612-616}, doi = {10.1111/gwat.13337}, pmid = {37358302}, issn = {1745-6584}, support = {1849458//Division of Earth Sciences/ ; }, mesh = {*Cloud Computing ; *Groundwater ; Software ; }, } @article {pmid37352938, year = {2023}, author = {Wang, D}, title = {Internet of things sports information collection and sports action simulation based on cloud computing data platform.}, journal = {Preventive medicine}, volume = {173}, number = {}, pages = {107579}, doi = {10.1016/j.ypmed.2023.107579}, pmid = {37352938}, issn = {1096-0260}, mesh = {Humans ; *Cloud Computing ; *Internet of Things ; Computer Simulation ; Computers ; Models, Theoretical ; Internet ; }, abstract = {In recent years, cloud computing technology has shown exponential growth, and the upgrading of hardware and the improvement of computing performance have brought significant changes to the Internet of Things industry. With the changes of the times and the emergence of many new demands, data platforms under cloud computing platforms must make corresponding changes according to the new demands. Among them, the construction of cross regional data centers is particularly important, especially in commercial environments. How to reduce the cost of data centers on cloud computing platforms while ensuring business quality has become a crucial issue. Based on the above situation, this article has optimized the bandwidth cost of the data center and solved the problem of big data transmission based on delayed big data windows and multi delay windows. A mathematical model for optimizing bandwidth cost under multi delay windows is proposed. This article also studied sports action simulation, which plays an important role in sports research, film animation, and virtual reality. Simulation actions are usually implemented based on data capture methods. However, these methods typically do not have interactivity with the environment. To enhance the authenticity and interactive ability of simulation action information collection, this article adopts reinforcement learning method for training and design, and applies a system of functions such as collecting human sports data processing. This article applies cloud computing data platforms and sports information collection to sports action simulation, making progress in the development of sports action simulation.}, } @article {pmid37346721, year = {2023}, author = {Zhang, X}, title = {Optimization design of railway logistics center layout based on mobile cloud edge computing.}, journal = {PeerJ. Computer science}, volume = {9}, number = {}, pages = {e1298}, pmid = {37346721}, issn = {2376-5992}, abstract = {With the development of the economy, the importance of railway freight transportation has become essential. The efficiency of a railway logistics center depends on the types, quantities, information exchange, and layout optimization. Edge collaboration technology can consider the advantages of cloud computing's rich computing storage resources and low latency. It can also provide additional computing power and real-time requirements for intelligent railway logistics construction. However, the cloud-side collaboration technology will introduce the wireless communication delay between the mobile terminal and the edge computing server. We designed a two-tier unloading strategy algorithm and solved the optimization problem by determining the unloading decision of each task. The cost of every task is calculated in the onboard device calculation, vehicular edge computing (VEC), and cloud computing server calculation. Simulation results show that the proposed method can save about 40% time delay compared to other unloading strategies.}, } @article {pmid37346641, year = {2023}, author = {Abd Rahman, NH and Mohamad Zaki, MH and Hasikin, K and Abd Razak, NA and Ibrahim, AK and Lai, KW}, title = {Predicting medical device failure: a promise to reduce healthcare facilities cost through smart healthcare management.}, journal = {PeerJ. Computer science}, volume = {9}, number = {}, pages = {e1279}, pmid = {37346641}, issn = {2376-5992}, abstract = {BACKGROUND: The advancement of biomedical research generates myriad healthcare-relevant data, including medical records and medical device maintenance information. The COVID-19 pandemic significantly affects the global mortality rate, creating an enormous demand for medical devices. As information technology has advanced, the concept of intelligent healthcare has steadily gained prominence. Smart healthcare utilises a new generation of information technologies, such as the Internet of Things (loT), big data, cloud computing, and artificial intelligence, to completely transform the traditional medical system. With the intention of presenting the concept of smart healthcare, a predictive model is proposed to predict medical device failure for intelligent management of healthcare services.

METHODS: Present healthcare device management can be improved by proposing a predictive machine learning model that prognosticates the tendency of medical device failures toward smart healthcare. The predictive model is developed based on 8,294 critical medical devices from 44 different types of equipment extracted from 15 healthcare facilities in Malaysia. The model classifies the device into three classes; (i) class 1, where the device is unlikely to fail within the first 3 years of purchase, (ii) class 2, where the device is likely to fail within 3 years from purchase date, and (iii) class 3 where the device is likely to fail more than 3 years after purchase. The goal is to establish a precise maintenance schedule and reduce maintenance and resource costs based on the time to the first failure event. A machine learning and deep learning technique were compared, and the best robust model for smart healthcare was proposed.

RESULTS: This study compares five algorithms in machine learning and three optimizers in deep learning techniques. The best optimized predictive model is based on ensemble classifier and SGDM optimizer, respectively. An ensemble classifier model produces 77.90%, 87.60%, and 75.39% for accuracy, specificity, and precision compared to 70.30%, 83.71%, and 67.15% for deep learning models. The ensemble classifier model improves to 79.50%, 88.36%, and 77.43% for accuracy, specificity, and precision after significant features are identified. The result concludes although machine learning has better accuracy than deep learning, more training time is required, which is 11.49 min instead of 1 min 5 s when deep learning is applied. The model accuracy shall be improved by introducing unstructured data from maintenance notes and is considered the author's future work because dealing with text data is time-consuming. The proposed model has proven to improve the devices' maintenance strategy with a Malaysian Ringgit (MYR) cost reduction of approximately MYR 326,330.88 per year. Therefore, the maintenance cost would drastically decrease if this smart predictive model is included in the healthcare management system.}, } @article {pmid37346547, year = {2023}, author = {Zhang, Y}, title = {A novel fast pedestrian recognition algorithm based on point cloud compression and boundary extraction.}, journal = {PeerJ. Computer science}, volume = {9}, number = {}, pages = {e1426}, pmid = {37346547}, issn = {2376-5992}, abstract = {REASON: Pedestrian recognition has great practical value and is a vital step toward applying path planning and intelligent obstacle avoidance in autonomous driving. In recent years, laser radar has played an essential role in pedestrian detection and recognition in unmanned driving. More accurate high spatial dimension and high-resolution data could be obtained by building a three-dimensional point cloud. However, the point cloud data collected by laser radar is often massive and contains a lot of redundancy, which is not conducive to transmission and storage. So, the processing speed grows slow when the original point cloud data is used for recognition. On the other hand, the compression processing of many laser radar point clouds could save computing power and speed up the recognition processing.

METHODOLOGY: The article utilizes the fusion point cloud data from laser radar to investigate the fast pedestrian recognition algorithm. The focus is to compress the collected point cloud data based on the boundary and feature value extraction and then use the point cloud pedestrian recognition algorithm based on image mapping to detect pedestrians. This article proposes a point cloud data compression method based on feature point extraction and reduced voxel grid.

RESULTS: The Karlsruhe Institute of Technology and Toyota Technological Institute data set is used to investigate the proposed algorithm experimentally. The outcomes indicate that the peak signal-to-noise ratio of the compression algorithm is improved by 6.02%. The recognition accuracy is improved by 16.93%, 17.2%, and 16.12%, corresponding to simple, medium, and difficult scenes, respectively, when compared with the point cloud pedestrian recognition method based on image mapping, which uses the random sampling method to compress the point cloud data.

CONCLUSION: The proposed method could achieve data compression better and ensure that many feature points are retained in the compressed Point Cloud Data (PCD). Thus, the compressed PCD achieves pedestrian recognition through an image-based mapping recognition algorithm.}, } @article {pmid37346511, year = {2023}, author = {Zhou, J and Liu, B and Gao, J}, title = {A task scheduling algorithm with deadline constraints for distributed clouds in smart cities.}, journal = {PeerJ. Computer science}, volume = {9}, number = {}, pages = {e1346}, pmid = {37346511}, issn = {2376-5992}, abstract = {Computing technologies and 5G are helpful for the development of smart cities. Cloud computing has become an essential smart city technology. With artificial intelligence technologies, it can be used to integrate data from various devices, such as sensors and cameras, over the network in a smart city for management of the infrastructure and processing of Internet of Things (IoT) data. Cloud computing platforms provide services to users. Task scheduling in the cloud environment is an important technology to shorten computing time and reduce user cost, and thus has many important applications. Recently, a hierarchical distributed cloud service network model for the smart city has been proposed where distributed (micro) clouds, and core clouds are considered to achieve a better network architecture. Task scheduling in the model has attracted many researchers. In this article, we study a task scheduling problem with deadline constraints in the distributed cloud model and aim to reduce the communication network's data load and provide low-latency services from the cloud server in the local area, hence promoting the efficiency of cloud computing services for local users. To solve the task scheduling problem efficiently, we present an efficient local search algorithm to solve the problem. In the algorithm, a greedy search strategy is proposed to improve the current solutions iteratively. Moreover, randomized methods are used in selecting tasks and virtual machines for reassigning tasks. We carried out extensive computational experiments to evaluate the performance of our algorithm and compared experimental results with Swarm-based approaches, such as GA and PSO. The comparative results show that the proposed local search algorithm performs better than the comparative algorithms on the task scheduling problem.}, } @article {pmid37342652, year = {2023}, author = {Nguyen, T and Bian, X and Roberson, D and Khanna, R and Chen, Q and Yan, C and Beck, R and Worman, Z and Meerzaman, D}, title = {Multi-omics Pathways Workflow (MOPAW): An Automated Multi-omics Workflow on the Cancer Genomics Cloud.}, journal = {Cancer informatics}, volume = {22}, number = {}, pages = {11769351231180992}, pmid = {37342652}, issn = {1176-9351}, abstract = {INTRODUCTION: In the era of big data, gene-set pathway analyses derived from multi-omics are exceptionally powerful. When preparing and analyzing high-dimensional multi-omics data, the installation process and programing skills required to use existing tools can be challenging. This is especially the case for those who are not familiar with coding. In addition, implementation with high performance computing solutions is required to run these tools efficiently.

METHODS: We introduce an automatic multi-omics pathway workflow, a point and click graphical user interface to Multivariate Single Sample Gene Set Analysis (MOGSA), hosted on the Cancer Genomics Cloud by Seven Bridges Genomics. This workflow leverages the combination of different tools to perform data preparation for each given data types, dimensionality reduction, and MOGSA pathway analysis. The Omics data includes copy number alteration, transcriptomics data, proteomics and phosphoproteomics data. We have also provided an additional workflow to help with downloading data from The Cancer Genome Atlas and Clinical Proteomic Tumor Analysis Consortium and preprocessing these data to be used for this multi-omics pathway workflow.

RESULTS: The main outputs of this workflow are the distinct pathways for subgroups of interest provided by users, which are displayed in heatmaps if identified. In addition to this, graphs and tables are provided to users for reviewing.

CONCLUSION: Multi-omics Pathway Workflow requires no coding experience. Users can bring their own data or download and preprocess public datasets from The Cancer Genome Atlas and Clinical Proteomic Tumor Analysis Consortium using our additional workflow based on the samples of interest. Distinct overactivated or deactivated pathways for groups of interest can be found. This useful information is important in effective therapeutic targeting.}, } @article {pmid37338245, year = {2024}, author = {Hotchkiss, J and Ridderman, E and Buftin, W}, title = {Overall US Hospice Quality According to Decedent Caregivers-Natural Language Processing and Sentiment Analysis of 3389 Online Caregiver Reviews.}, journal = {The American journal of hospice & palliative care}, volume = {41}, number = {5}, pages = {527-544}, doi = {10.1177/10499091231185593}, pmid = {37338245}, issn = {1938-2715}, mesh = {Aged ; Humans ; United States ; *Hospice Care/psychology ; *Hospices ; Caregivers/psychology ; Sentiment Analysis ; Natural Language Processing ; Medicare ; Pain ; }, abstract = {Objectives: With an untapped quality resource in online hospice reviews, study aims were exploring hospice caregiver experiences and assessing their expectations of the hospice Medicare benefit. Methods: Topical and sentiment analysis was conducted using natural language processing (NLP) of Google and Yelp caregiver reviews (n = 3393) between 2013-2023 using Google NLP. Stratified sampling weighted by hospice size to approximate the daily census of US hospice enrollees. Results: Overall caregiver sentiment of hospice care was neutral (S = .14). Therapeutic, achievable expectations and misperceptions, unachievable expectations were, respectively, the most and least prevalent domains. Four topics with the highest prevalence, all had moderately positive sentiments: caring staff, staff professionalism and knowledge; emotional, spiritual, bereavement support; and responsive, timely or helpful. Lowest sentiments scores were lack of staffing; promises made, but not kept, pain, symptoms and medications; sped-up death, hasted, or sedated; and money, staff motivations. Significance of Results: Caregivers overall rating of hospice was neutral, largely due to moderate sentiment on achievable expectations in two-thirds of reviews mixed with unachievable expectations in one-sixth of reviews. Hospice caregivers were most likely to recommend hospices with caring staff, providing quality care, responsive to requests, and offering family support. Lack of staff, inadequate pain-symptom management were the two biggest barriers to hospice quality. All eight CAHPS measures were found in the discovered review topics. Close-ended CAHPS scores and open-ended online reviews have complementary insights. Future research should explore associations between CAHPS and review insights.}, } @article {pmid37334641, year = {2023}, author = {Schnabel, B and Gebert, J and Schneider, R and Helwig, P}, title = {Towards the simulation of bone-implant systems with a stratified material model.}, journal = {Technology and health care : official journal of the European Society for Engineering and Medicine}, volume = {31}, number = {4}, pages = {1555-1566}, doi = {10.3233/THC-237001}, pmid = {37334641}, issn = {1878-7401}, mesh = {Humans ; Computer Simulation ; *Femur ; Finite Element Analysis ; *Image Processing, Computer-Assisted ; Reproducibility of Results ; }, abstract = {BACKGROUND: The clinical performance of medical devices is becoming increasingly important for the requirements of modern development processes and the associated regulations. However, the evidence for this performance can often only be obtained very late in the development process via clinical trials or studies.

OBJECTIVE: The purpose of the presented work is to show that the simulation of bone-implant systems has advanced in various aspects, including cloud-based execution, Virtual Clinical Trials, and material modeling towards a point where and widespread utilization in healthcare for procedure planning and enhancing practices seems feasible. But this will only hold true if the virtual cohort data build from clinical Computer Tomography data are collected and analysed with care.

METHODS: An overview of the principal steps necessary to perform Finite Element Method based structural mechanical simulations of bone-implant systems based on clinical imaging data is presented. Since these data form the baseline for virtual cohort construction, we present an enhancement method to make them more accurate and reliable.

RESULTS: The findings of our work comprise the initial step towards a virtual cohort for the evaluation of proximal femur implants. In addition, results of our proposed enhancement methodology for clinical Computer Tomography data that demonstrate the necessity for the usage of multiple image reconstructions are presented.

CONCLUSION: Simulation methodologies and pipelines nowadays are mature and have turnaround times that allow for a day-to-day use. However, small changes in the imaging and the preprocessing of data can have a significant impact on the obtaind results. Consequently, first steps towards virtual clinical trials, like collecting bone samples, are done, but the reliability of the input data remains subject to further research and development.}, } @article {pmid37328705, year = {2023}, author = {Yang, M and Bo, Z and Xu, T and Xu, B and Wang, D and Zheng, H}, title = {Uni-GBSA: an open-source and web-based automatic workflow to perform MM/GB(PB)SA calculations for virtual screening.}, journal = {Briefings in bioinformatics}, volume = {24}, number = {4}, pages = {}, doi = {10.1093/bib/bbad218}, pmid = {37328705}, issn = {1477-4054}, mesh = {Workflow ; Entropy ; *Molecular Dynamics Simulation ; *Drug Discovery ; Ligands ; Internet ; Protein Binding ; }, abstract = {Binding free energy calculation of a ligand to a protein receptor is a fundamental objective in drug discovery. Molecular mechanics/Generalized-Born (Poisson-Boltzmann) surface area (MM/GB(PB)SA) is one of the most popular methods for binding free energy calculations. It is more accurate than most scoring functions and more computationally efficient than alchemical free energy methods. Several open-source tools for performing MM/GB(PB)SA calculations have been developed, but they have limitations and high entry barriers to users. Here, we introduce Uni-GBSA, a user-friendly automatic workflow to perform MM/GB(PB)SA calculations, which can perform topology preparation, structure optimization, binding free energy calculation and parameter scanning for MM/GB(PB)SA calculations. It also offers a batch mode that evaluates thousands of molecules against one protein target in parallel for efficient application in virtual screening. The default parameters are selected after systematic testing on the PDBBind-2011 refined dataset. In our case studies, Uni-GBSA produced a satisfactory correlation with the experimental binding affinities and outperformed AutoDock Vina in molecular enrichment. Uni-GBSA is available as an open-source package at https://github.com/dptech-corp/Uni-GBSA. It can also be accessed for virtual screening from the Hermite web platform at https://hermite.dp.tech. A free Uni-GBSA web server of a lab version is available at https://labs.dp.tech/projects/uni-gbsa/. This increases user-friendliness because the web server frees users from package installations and provides users with validated workflows for input data and parameter settings, cloud computing resources for efficient job completions, a user-friendly interface and professional support and maintenance.}, } @article {pmid37317615, year = {2023}, author = {Rathke, BH and Yu, H and Huang, H}, title = {What Remains Now That the Fear Has Passed? Developmental Trajectory Analysis of COVID-19 Pandemic for Co-occurrences of Twitter, Google Trends, and Public Health Data.}, journal = {Disaster medicine and public health preparedness}, volume = {17}, number = {}, pages = {e471}, doi = {10.1017/dmp.2023.101}, pmid = {37317615}, issn = {1938-744X}, mesh = {Humans ; United States/epidemiology ; *COVID-19/epidemiology/psychology ; Pandemics ; Public Health ; *Social Media ; Search Engine ; Fear ; }, abstract = {OBJECTIVE: The rapid onset of coronavirus disease 2019 (COVID-19) created a complex virtual collective consciousness. Misinformation and polarization were hallmarks of the pandemic in the United States, highlighting the importance of studying public opinion online. Humans express their thoughts and feelings more openly than ever before on social media; co-occurrence of multiple data sources have become valuable for monitoring and understanding public sentimental preparedness and response to an event within our society.

METHODS: In this study, Twitter and Google Trends data were used as the co-occurrence data for the understanding of the dynamics of sentiment and interest during the COVID-19 pandemic in the United States from January 2020 to September 2021. Developmental trajectory analysis of Twitter sentiment was conducted using corpus linguistic techniques and word cloud mapping to reveal 8 positive and negative sentiments and emotions. Machine learning algorithms were used to implement the opinion mining how Twitter sentiment was related to Google Trends interest with historical COVID-19 public health data.

RESULTS: The sentiment analysis went beyond polarity to detect specific feelings and emotions during the pandemic.

CONCLUSIONS: The discoveries on the behaviors of emotions at each stage of the pandemic were presented from the emotion detection when associated with the historical COVID-19 data and Google Trends data.}, } @article {pmid37315445, year = {2023}, author = {Guzman, NA and Guzman, DE and Blanc, T}, title = {Advancements in portable instruments based on affinity-capture-migration and affinity-capture-separation for use in clinical testing and life science applications.}, journal = {Journal of chromatography. A}, volume = {1704}, number = {}, pages = {464109}, doi = {10.1016/j.chroma.2023.464109}, pmid = {37315445}, issn = {1873-3778}, mesh = {Humans ; *Pandemics ; *COVID-19/diagnosis ; Laboratories ; Smartphone ; Immunoassay/methods ; COVID-19 Testing ; }, abstract = {The shift from testing at centralized diagnostic laboratories to remote locations is being driven by the development of point-of-care (POC) instruments and represents a transformative moment in medicine. POC instruments address the need for rapid results that can inform faster therapeutic decisions and interventions. These instruments are especially valuable in the field, such as in an ambulance, or in remote and rural locations. The development of telehealth, enabled by advancements in digital technologies like smartphones and cloud computing, is also aiding in this evolution, allowing medical professionals to provide care remotely, potentially reducing healthcare costs and improving patient longevity. One notable POC device is the lateral flow immunoassay (LFIA), which played a major role in addressing the COVID-19 pandemic due to its ease of use, rapid analysis time, and low cost. However, LFIA tests exhibit relatively low analytical sensitivity and provide semi-quantitative information, indicating either a positive, negative, or inconclusive result, which can be attributed to its one-dimensional format. Immunoaffinity capillary electrophoresis (IACE), on the other hand, offers a two-dimensional format that includes an affinity-capture step of one or more matrix constituents followed by release and electrophoretic separation. The method provides greater analytical sensitivity, and quantitative information, thereby reducing the rate of false positives, false negatives, and inconclusive results. Combining LFIA and IACE technologies can thus provide an effective and economical solution for screening, confirming results, and monitoring patient progress, representing a key strategy in advancing diagnostics in healthcare.}, } @article {pmid37313273, year = {2023}, author = {Alshammari, A and Fayez Alanazi, M}, title = {Use of Technology in Enhancing Learning Among Nurses in Saudi Arabia; a Systematic Review.}, journal = {Journal of multidisciplinary healthcare}, volume = {16}, number = {}, pages = {1587-1599}, pmid = {37313273}, issn = {1178-2390}, abstract = {The landscape of teaching and learning, particularly in the realm of technology-supported education, is being transformed by the ongoing presence of portable digital assistant devices and other technological tools. Such technologies have become an integral aspect of learning these days. The use of Virtual Reality, Augmented Reality, cloud computing, and social media through platforms such as Twitter, Dropbox, Google Apps, and YouTube has become the norm in modern education and has greatly enhanced the quality of higher nursing education. Therefore, this study aims to synthesize evidence on the effectiveness of technology in nursing education in Saudi Arabia. The study used a systematic review methodology to extract relevant studies from databases and reference lists of related literature reviews. Two independent reviewers screened the title, abstract, and full texts based on predefined eligibility criteria. The review identified four themes from the data retrieved from 15 published articles. The themes include attitude towards e-learning, challenges and quality related to e-learning, social media and smart phones usage, virtual reality and simulation experience. Mixed attitudes have been identified among the participants of the selected studies. Various challenges linked with e-learning, social media usage, smart phones, and simulation have been identified inclusive of technical issues, lack of awareness, lack of training, etc. The findings have also stated that awareness level should be increased related to e-learning for better outcomes in Saudi Arabia. The findings suggest that technology has the potential to improve learning outcomes for nurses, including those involved in research. Therefore, it is crucial to ensure that both educators and students receive adequate training on how to effectively use the upcoming technology in Saudi Arabia.}, } @article {pmid37312944, year = {2023}, author = {Farooq, MS and Riaz, S and Tehseen, R and Farooq, U and Saleem, K}, title = {Role of Internet of things in diabetes healthcare: Network infrastructure, taxonomy, challenges, and security model.}, journal = {Digital health}, volume = {9}, number = {}, pages = {20552076231179056}, pmid = {37312944}, issn = {2055-2076}, abstract = {The Internet of things (IoT) is an emerging technology that enables ubiquitous devices to connect with the Internet. IoT technology has revolutionized the medical and healthcare industry by interconnecting smart devices and sensors. IoT-based devices and biosensors are ideal to detect diabetes disease by collecting the accurate value of glucose continuously. Diabetes is one of the well-known and major chronic diseases that has a worldwide social impact on community life. Blood glucose monitoring is a challenging task, and there is a need to propose a proper architecture of the noninvasive glucose sensing and monitoring mechanism, which could make diabetic people aware of self-management techniques. This survey presents a rigorous discussion of diabetes types and presents detection techniques based on IoT technology. In this research, an IoT-based healthcare network infrastructure has been proposed for monitoring diabetes disease based on big data analytics, cloud computing, and machine learning. The proposed infrastructure could handle the symptoms of diabetes, collect data, analyze it, and then transmit the results to the server for the next action. Besides, presented an inclusive survey on IoT-based diabetes monitoring applications, services, and proposed solutions. Furthermore, based on IoT technology the diabetes disease management taxonomy has also been presented. Finally, presented the attacks taxonomy as well as discussed challenges, and proposed a lightweight security model in order to secure the patient's health data.}, } @article {pmid37310789, year = {2023}, author = {Campi, D and Mounet, N and Gibertini, M and Pizzi, G and Marzari, N}, title = {Expansion of the Materials Cloud 2D Database.}, journal = {ACS nano}, volume = {17}, number = {12}, pages = {11268-11278}, pmid = {37310789}, issn = {1936-086X}, abstract = {Two-dimensional (2D) materials are among the most promising candidates for beyond-silicon electronic, optoelectronic, and quantum computing applications. Recently, their recognized importance sparked a push to discover and characterize novel 2D materials. Within a few years, the number of experimentally exfoliated or synthesized 2D materials went from a few to more than a hundred, with the number of theoretically predicted compounds reaching a few thousand. In 2018 we first contributed to this effort with the identification of 1825 compounds that are either easily (1036) or potentially (789) exfoliable from experimentally known 3D compounds. Here, we report on a major expansion of this 2D portfolio thanks to the extension of the screening protocol to an additional experimental database (MPDS) as well as the updated versions of the two databases (ICSD and COD) used in our previous work. This expansion leads to the discovery of an additional 1252 monolayers, bringing the total to 3077 compounds and, notably, almost doubling the number of easily exfoliable materials to 2004. We optimize the structural properties of all these monolayers and explore their electronic structure with a particular emphasis on those rare large-bandgap 2D materials that could be precious in isolating 2D field-effect-transistor channels. Finally, for each material containing up to 6 atoms per unit cell, we identify the best candidates to form commensurate heterostructures, balancing requirements on supercell size and minimal strain.}, } @article {pmid37304830, year = {2023}, author = {Song, Z and Ma, H and Sun, S and Xin, Y and Zhang, R}, title = {Rainbow: reliable personally identifiable information retrieval across multi-cloud.}, journal = {Cybersecurity}, volume = {6}, number = {1}, pages = {19}, pmid = {37304830}, issn = {2523-3246}, abstract = {Personally identifiable information (PII) refers to any information that links to an individual. Sharing PII is extremely useful in public affairs yet hard to implement due to the worries about privacy violations. Building a PII retrieval service over multi-cloud, which is a modern strategy to make services stable where multiple servers are deployed, seems to be a promising solution. However, three major technical challenges remain to be solved. The first is the privacy and access control of PII. In fact, each entry in PII can be shared to different users with different access rights. Hence, flexible and fine-grained access control is needed. Second, a reliable user revocation mechanism is required to ensure that users can be revoked efficiently, even if few cloud servers are compromised or collapse, to avoid data leakage. Third, verifying the correctness of received PII and locating a misbehaved server when wrong data are returned is crucial to guarantee user's privacy, but challenging to realize. In this paper, we propose Rainbow, a secure and practical PII retrieval scheme to solve the above issues. In particular, we design an important cryptographic tool, called Reliable Outsourced Attribute Based Encryption (ROABE) which provides data privacy, flexible and fine-grained access control, reliable immediate user revocation and verification for multiple servers simultaneously, to support Rainbow. Moreover, we present how to build Rainbow with ROABE and several necessary cloud techniques in real world. To evaluate the performance, we deploy Rainbow on multiple mainstream clouds, namely, AWS, GCP and Microsoft Azure, and experiment in browsers on mobile phones and computers. Both theoretical analysis and experimental results indicate that Rainbow is secure and practical.}, } @article {pmid37303980, year = {2023}, author = {Rodrigues, VF and da Rosa Righi, R and da Costa, CA and Zeiser, FA and Eskofier, B and Maier, A and Kim, D}, title = {Digital health in smart cities: Rethinking the remote health monitoring architecture on combining edge, fog, and cloud.}, journal = {Health and technology}, volume = {13}, number = {3}, pages = {449-472}, pmid = {37303980}, issn = {2190-7188}, abstract = {PURPOSE: Smart cities that support the execution of health services are more and more in evidence today. Here, it is mainstream to use IoT-based vital sign data to serve a multi-tier architecture. The state-of-the-art proposes the combination of edge, fog, and cloud computing to support critical health applications efficiently. However, to the best of our knowledge, initiatives typically present the architectures, not bringing adaptation and execution optimizations to address health demands fully.

METHODS: This article introduces the VitalSense model, which provides a hierarchical multi-tier remote health monitoring architecture in smart cities by combining edge, fog, and cloud computing.

RESULTS: Although using a traditional composition, our contributions appear in handling each infrastructure level. We explore adaptive data compression and homomorphic encryption at the edge, a multi-tier notification mechanism, low latency health traceability with data sharding, a Serverless execution engine to support multiple fog layers, and an offloading mechanism based on service and person computing priorities.

CONCLUSIONS: This article details the rationale behind these topics, describing VitalSense use cases for disruptive healthcare services and preliminary insights regarding prototype evaluation.}, } @article {pmid37300076, year = {2023}, author = {Aqeel, I and Khormi, IM and Khan, SB and Shuaib, M and Almusharraf, A and Alam, S and Alkhaldi, NA}, title = {Load Balancing Using Artificial Intelligence for Cloud-Enabled Internet of Everything in Healthcare Domain.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {11}, pages = {}, pmid = {37300076}, issn = {1424-8220}, mesh = {Animals ; Horses ; *Artificial Intelligence ; *Algorithms ; Intelligence ; Awareness ; Internet ; }, abstract = {The emergence of the Internet of Things (IoT) and its subsequent evolution into the Internet of Everything (IoE) is a result of the rapid growth of information and communication technologies (ICT). However, implementing these technologies comes with certain obstacles, such as the limited availability of energy resources and processing power. Consequently, there is a need for energy-efficient and intelligent load-balancing models, particularly in healthcare, where real-time applications generate large volumes of data. This paper proposes a novel, energy-aware artificial intelligence (AI)-based load balancing model that employs the Chaotic Horse Ride Optimization Algorithm (CHROA) and big data analytics (BDA) for cloud-enabled IoT environments. The CHROA technique enhances the optimization capacity of the Horse Ride Optimization Algorithm (HROA) using chaotic principles. The proposed CHROA model balances the load, optimizes available energy resources using AI techniques, and is evaluated using various metrics. Experimental results show that the CHROA model outperforms existing models. For instance, while the Artificial Bee Colony (ABC), Gravitational Search Algorithm (GSA), and Whale Defense Algorithm with Firefly Algorithm (WD-FA) techniques attain average throughputs of 58.247 Kbps, 59.957 Kbps, and 60.819 Kbps, respectively, the CHROA model achieves an average throughput of 70.122 Kbps. The proposed CHROA-based model presents an innovative approach to intelligent load balancing and energy optimization in cloud-enabled IoT environments. The results highlight its potential to address critical challenges and contribute to developing efficient and sustainable IoT/IoE solutions.}, } @article {pmid37299993, year = {2023}, author = {Iqbal, F and Altaf, A and Waris, Z and Aray, DG and Flores, MAL and Díez, IT and Ashraf, I}, title = {Blockchain-Modeled Edge-Computing-Based Smart Home Monitoring System with Energy Usage Prediction.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {11}, pages = {}, pmid = {37299993}, issn = {1424-8220}, support = {N/A//European University of Atlantics/ ; }, abstract = {Internet of Things (IoT) has made significant strides in energy management systems recently. Due to the continually increasing cost of energy, supply-demand disparities, and rising carbon footprints, the need for smart homes for monitoring, managing, and conserving energy has increased. In IoT-based systems, device data are delivered to the network edge before being stored in the fog or cloud for further transactions. This raises worries about the data's security, privacy, and veracity. It is vital to monitor who accesses and updates this information to protect IoT end-users linked to IoT devices. Smart meters are installed in smart homes and are susceptible to numerous cyber attacks. Access to IoT devices and related data must be secured to prevent misuse and protect IoT users' privacy. The purpose of this research was to design a blockchain-based edge computing method for securing the smart home system, in conjunction with machine learning techniques, in order to construct a secure smart home system with energy usage prediction and user profiling. The research proposes a blockchain-based smart home system that can continuously monitor IoT-enabled smart home appliances such as smart microwaves, dishwashers, furnaces, and refrigerators, among others. An approach based on machine learning was utilized to train the auto-regressive integrated moving average (ARIMA) model for energy usage prediction, which is provided in the user's wallet, to estimate energy consumption and maintain user profiles. The model was tested using the moving average statistical model, the ARIMA model, and the deep-learning-based long short-term memory (LSTM) model on a dataset of smart-home-based energy usage under changing weather conditions. The findings of the analysis reveal that the LSTM model accurately forecasts the energy usage of smart homes.}, } @article {pmid37299938, year = {2023}, author = {Yavari, A and Korala, H and Georgakopoulos, D and Kua, J and Bagha, H}, title = {Sazgar IoT: A Device-Centric IoT Framework and Approximation Technique for Efficient and Scalable IoT Data Processing.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {11}, pages = {}, pmid = {37299938}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) plays a fundamental role in monitoring applications; however, existing approaches relying on cloud and edge-based IoT data analysis encounter issues such as network delays and high costs, which can adversely impact time-sensitive applications. To address these challenges, this paper proposes an IoT framework called Sazgar IoT. Unlike existing solutions, Sazgar IoT leverages only IoT devices and IoT data analysis approximation techniques to meet the time-bounds of time-sensitive IoT applications. In this framework, the computing resources onboard the IoT devices are utilised to process the data analysis tasks of each time-sensitive IoT application. This eliminates the network delays associated with transferring large volumes of high-velocity IoT data to cloud or edge computers. To ensure that each task meets its application-specific time-bound and accuracy requirements, we employ approximation techniques for the data analysis tasks of time-sensitive IoT applications. These techniques take into account the available computing resources and optimise the processing accordingly. To evaluate the effectiveness of Sazgar IoT, experimental validation has been conducted. The results demonstrate that the framework successfully meets the time-bound and accuracy requirements of the COVID-19 citizen compliance monitoring application by effectively utilising the available IoT devices. The experimental validation further confirms that Sazgar IoT is an efficient and scalable solution for IoT data processing, addressing existing network delay issues for time-sensitive applications and significantly reducing the cost related to cloud and edge computing devices procurement, deployment, and maintenance.}, } @article {pmid37299817, year = {2023}, author = {Zhang, X and Cheng, Z and Du, L and Du, Y}, title = {Progressive Classifier Mechanism for Bridge Expansion Joint Health Status Monitoring System Based on Acoustic Sensors.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {11}, pages = {}, pmid = {37299817}, issn = {1424-8220}, mesh = {*Acoustics ; *Algorithms ; Cloud Computing ; Computer Simulation ; Health Status ; }, abstract = {The application of IoT (Internet of Things) technology to the health monitoring of expansion joints is of great importance in enhancing the efficiency of bridge expansion joint maintenance. In this study, a low-power, high-efficiency, end-to-cloud coordinated monitoring system analyzes acoustic signals to identify faults in bridge expansion joints. To address the issue of scarce authentic data related to bridge expansion joint failures, an expansion joint damage simulation data collection platform is established for well-annotated datasets. Based on this, a progressive two-level classifier mechanism is proposed, combining template matching based on AMPD (Automatic Peak Detection) and deep learning algorithms based on VMD (Variational Mode Decomposition), denoising, and utilizing edge and cloud computing power efficiently. The simulation-based datasets were used to test the two-level algorithm, with the first-level edge-end template matching algorithm achieving fault detection rates of 93.3% and the second-level cloud-based deep learning algorithm achieving classification accuracy of 98.4%. The proposed system in this paper has demonstrated efficient performance in monitoring the health of expansion joints, according to the aforementioned results.}, } @article {pmid37299800, year = {2023}, author = {Hou, KM and Diao, X and Shi, H and Ding, H and Zhou, H and de Vaulx, C}, title = {Trends and Challenges in AIoT/IIoT/IoT Implementation.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {11}, pages = {}, pmid = {37299800}, issn = {1424-8220}, abstract = {For the next coming years, metaverse, digital twin and autonomous vehicle applications are the leading technologies for many complex applications hitherto inaccessible such as health and life sciences, smart home, smart agriculture, smart city, smart car and logistics, Industry 4.0, entertainment (video game) and social media applications, due to recent tremendous developments in process modeling, supercomputing, cloud data analytics (deep learning, etc.), communication network and AIoT/IIoT/IoT technologies. AIoT/IIoT/IoT is a crucial research field because it provides the essential data to fuel metaverse, digital twin, real-time Industry 4.0 and autonomous vehicle applications. However, the science of AIoT is inherently multidisciplinary, and therefore, it is difficult for readers to understand its evolution and impacts. Our main contribution in this article is to analyze and highlight the trends and challenges of the AIoT technology ecosystem including core hardware (MCU, MEMS/NEMS sensors and wireless access medium), core software (operating system and protocol communication stack) and middleware (deep learning on a microcontroller: TinyML). Two low-powered AI technologies emerge: TinyML and neuromorphic computing, but only one AIoT/IIoT/IoT device implementation using TinyML dedicated to strawberry disease detection as a case study. So far, despite the very rapid progress of AIoT/IIoT/IoT technologies, several challenges remain to be overcome such as safety, security, latency, interoperability and reliability of sensor data, which are essential characteristics to meet the requirements of metaverse, digital twin, autonomous vehicle and Industry 4.0. applications.}, } @article {pmid37299731, year = {2023}, author = {AlQahtani, SA}, title = {An Evaluation of e-Health Service Performance through the Integration of 5G IoT, Fog, and Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {11}, pages = {}, pmid = {37299731}, issn = {1424-8220}, mesh = {*Telemedicine/instrumentation/methods ; Cloud Computing ; Internet of Things ; Neural Networks, Computer ; Computer Simulation ; }, abstract = {In recent years, Internet of Things (IoT) advancements have led to the development of vastly improved remote healthcare services. Scalability, high bandwidth, low latency, and low power consumption are all essential features of the applications that make these services possible. An upcoming healthcare system and wireless sensor network that can fulfil these needs is based on fifth-generation network slicing. For better resource management, organizations can implement network slicing, which partitions the physical network into distinct logical slices according to quality of service (QoS) needs. Based on the findings of this research, an IoT-fog-cloud architecture is proposed for use in e-Health services. The framework is made up of three different but interconnected systems: a cloud radio access network, a fog computing system, and a cloud computing system. A queuing network serves as a model for the proposed system. The model's constituent parts are then subjected to analysis. To assess the system's performance, we run a numerical example simulation using Java modelling tools and then analyze the results to identify the key performance parameters. The analytical formulas that were derived ensure the precision of the results. Finally, the results show that the proposed model improves eHealth services' quality of service in an efficient way by selecting the right slice compared to the traditional systems.}, } @article {pmid37274449, year = {2023}, author = {Verma, P and Gupta, A and Kumar, M and Gill, SS}, title = {FCMCPS-COVID: AI propelled fog-cloud inspired scalable medical cyber-physical system, specific to coronavirus disease.}, journal = {Internet of things (Amsterdam, Netherlands)}, volume = {23}, number = {}, pages = {100828}, pmid = {37274449}, issn = {2542-6605}, abstract = {Medical cyber-physical systems (MCPS) firmly integrate a network of medical objects. These systems are highly efficacious and have been progressively used in the Healthcare 4.0 to achieve continuous high-quality services. Healthcare 4.0 encompasses numerous emerging technologies and their applications have been realized in the monitoring of a variety of virus outbreaks. As a growing healthcare trend, coronavirus disease (COVID-19) can be cured and its spread can be prevented using MCPS. This virus spreads from human to human and can have devastating consequences. Moreover, with the alarmingly rising death rate and new cases across the world, there is an urgent need for continuous identification and screening of infected patients to mitigate their spread. Motivated by the facts, we propose a framework for early detection, prevention, and control of the COVID-19 outbreak by using novel Industry 5.0 technologies. The proposed framework uses a dimensionality reduction technique in the fog layer, allowing high-quality data to be used for classification purposes. The fog layer also uses the ensemble learning-based data classification technique for the detection of COVID-19 patients based on the symptomatic dataset. In addition, in the cloud layer, social network analysis (SNA) has been performed to control the spread of COVID-19. The experimental results reveal that compared with state-of-the-art methods, the proposed framework achieves better results in terms of accuracy (82.28 %), specificity (91.42 %), sensitivity (90 %) and stability with effective response time. Furthermore, the utilization of CVI-based alert generation at the fog layer improves the novelty aspects of the proposed system.}, } @article {pmid37274420, year = {2023}, author = {Rezazadeh, B and Asghari, P and Rahmani, AM}, title = {Computer-aided methods for combating Covid-19 in prevention, detection, and service provision approaches.}, journal = {Neural computing & applications}, volume = {35}, number = {20}, pages = {14739-14778}, pmid = {37274420}, issn = {0941-0643}, abstract = {The infectious disease Covid-19 has been causing severe social, economic, and human suffering across the globe since 2019. The countries have utilized different strategies in the last few years to combat Covid-19 based on their capabilities, technological infrastructure, and investments. A massive epidemic like this cannot be controlled without an intelligent and automatic health care system. The first reaction to the disease outbreak was lockdown, and researchers focused more on developing methods to diagnose the disease and recognize its behavior. However, as the new lifestyle becomes more normalized, research has shifted to utilizing computer-aided methods to monitor, track, detect, and treat individuals and provide services to citizens. Thus, the Internet of things, based on fog-cloud computing, using artificial intelligence approaches such as machine learning, and deep learning are practical concepts. This article aims to survey computer-based approaches to combat Covid-19 based on prevention, detection, and service provision. Technically and statistically, this article analyzes current methods, categorizes them, presents a technical taxonomy, and explores future and open issues.}, } @article {pmid37266260, year = {2023}, author = {Healthcare Engineering, JO}, title = {Retracted: Medical Equipment Comprehensive Management System Based on Cloud Computing and Internet of Things.}, journal = {Journal of healthcare engineering}, volume = {2023}, number = {}, pages = {9805036}, pmid = {37266260}, issn = {2040-2309}, abstract = {[This retracts the article DOI: 10.1155/2021/6685456.].}, } @article {pmid37266231, year = {2023}, author = {Healthcare Engineering, JO}, title = {Retracted: Digital Forensic Investigation of Healthcare Data in Cloud Computing Environment.}, journal = {Journal of healthcare engineering}, volume = {2023}, number = {}, pages = {9782643}, pmid = {37266231}, issn = {2040-2309}, abstract = {[This retracts the article DOI: 10.1155/2022/9709101.].}, } @article {pmid37259021, year = {2023}, author = {O'Connell, KA and Yosufzai, ZB and Campbell, RA and Lobb, CJ and Engelken, HT and Gorrell, LM and Carlson, TB and Catana, JJ and Mikdadi, D and Bonazzi, VR and Klenk, JA}, title = {Accelerating genomic workflows using NVIDIA Parabricks.}, journal = {BMC bioinformatics}, volume = {24}, number = {1}, pages = {221}, pmid = {37259021}, issn = {1471-2105}, mesh = {Workflow ; *Computer Graphics ; *Software ; Genomics ; }, abstract = {BACKGROUND: As genome sequencing becomes better integrated into scientific research, government policy, and personalized medicine, the primary challenge for researchers is shifting from generating raw data to analyzing these vast datasets. Although much work has been done to reduce compute times using various configurations of traditional CPU computing infrastructures, Graphics Processing Units (GPUs) offer opportunities to accelerate genomic workflows by orders of magnitude. Here we benchmark one GPU-accelerated software suite called NVIDIA Parabricks on Amazon Web Services (AWS), Google Cloud Platform (GCP), and an NVIDIA DGX cluster. We benchmarked six variant calling pipelines, including two germline callers (HaplotypeCaller and DeepVariant) and four somatic callers (Mutect2, Muse, LoFreq, SomaticSniper).

RESULTS: We achieved up to 65 × acceleration with germline variant callers, bringing HaplotypeCaller runtimes down from 36 h to 33 min on AWS, 35 min on GCP, and 24 min on the NVIDIA DGX. Somatic callers exhibited more variation between the number of GPUs and computing platforms. On cloud platforms, GPU-accelerated germline callers resulted in cost savings compared with CPU runs, whereas some somatic callers were more expensive than CPU runs because their GPU acceleration was not sufficient to overcome the increased GPU cost.

CONCLUSIONS: Germline variant callers scaled well with the number of GPUs across platforms, whereas somatic variant callers exhibited more variation in the number of GPUs with the fastest runtimes, suggesting that, at least with the version of Parabricks used here, these workflows are less GPU optimized and require benchmarking on the platform of choice before being deployed at production scales. Our study demonstrates that GPUs can be used to greatly accelerate genomic workflows, thus bringing closer to grasp urgent societal advances in the areas of biosurveillance and personalized medicine.}, } @article {pmid37258867, year = {2023}, author = {Callaghan, M}, title = {Cloud Computing for Metagenomics: Building a Personalized Computational Platform for Pipeline Analyses.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2649}, number = {}, pages = {261-279}, pmid = {37258867}, issn = {1940-6029}, mesh = {*Software ; *Metagenomics ; Cloud Computing ; Computers ; Web Browser ; Computational Biology/methods ; }, abstract = {Cloud Computing services such as Microsoft Azure, Amazon Web Services, and Google Cloud provide a range of tools and services that enable scientists to rapidly prototype, build, and deploy platforms for their computational experiments.This chapter describes a protocol to deploy and configure an Ubuntu Linux Virtual Machine in the Microsoft Azure cloud, which includes Minconda Python, a Jupyter Lab server, and the QIIME toolkit configured for access through a web browser to facilitate a typical metagenomics analysis pipeline.}, } @article {pmid37252914, year = {2023}, author = {Arefian, Z and Khayyambashi, MR and Movahhedinia, N}, title = {Delay reduction in MTC using SDN based offloading in Fog computing.}, journal = {PloS one}, volume = {18}, number = {5}, pages = {e0286483}, pmid = {37252914}, issn = {1932-6203}, mesh = {*Software ; *Algorithms ; Cloud Computing ; Communication ; Weather ; }, abstract = {Fog computing (FC) brings a Cloud close to users and improves the quality of service and delay services. In this article, the convergence of FC and Software-Defined-Networking (SDN) has been proposed to implement complicated mechanisms of resource management. SDN has suited the practical standard for FC systems. The priority and differential flow space allocation have been applied to arrange this framework for the heterogeneous request in Machine-Type-Communications. The delay-sensitive flows are assigned to a configuration of priority queues on each Fog. Due to limited resources in the Fog, a promising solution is offloading flows to other Fogs through a decision-based SDN controller. The flow-based Fog nodes have been modeled according to the queueing theory, where polling priority algorithms have been applied to service the flows and to reduce the starvation problem in a multi-queueing model. It is observed that the percentage of delay-sensitive processed flows, the network consumption, and the average service time in the proposed mechanism are improved by about 80%, 65%, and 60%, respectively, compared to traditional Cloud computing. Therefore, the delay reductions based on the types of flows and task offloading is proposed.}, } @article {pmid37252813, year = {2023}, author = {Samarakoon, H and Ferguson, JM and Gamaarachchi, H and Deveson, IW}, title = {Accelerated nanopore basecalling with SLOW5 data format.}, journal = {Bioinformatics (Oxford, England)}, volume = {39}, number = {6}, pages = {}, pmid = {37252813}, issn = {1367-4811}, mesh = {*Software ; Sequence Analysis, DNA/methods ; *Nanopores ; Genome ; Genomics ; High-Throughput Nucleotide Sequencing ; }, abstract = {MOTIVATION: Nanopore sequencing is emerging as a key pillar in the genomic technology landscape but computational constraints limiting its scalability remain to be overcome. The translation of raw current signal data into DNA or RNA sequence reads, known as 'basecalling', is a major friction in any nanopore sequencing workflow. Here, we exploit the advantages of the recently developed signal data format 'SLOW5' to streamline and accelerate nanopore basecalling on high-performance computing (HPC) and cloud environments.

RESULTS: SLOW5 permits highly efficient sequential data access, eliminating a potential analysis bottleneck. To take advantage of this, we introduce Buttery-eel, an open-source wrapper for Oxford Nanopore's Guppy basecaller that enables SLOW5 data access, resulting in performance improvements that are essential for scalable, affordable basecalling.

Buttery-eel is available at https://github.com/Psy-Fer/buttery-eel.}, } @article {pmid37252270, year = {2023}, author = {Torres-Gaona, G and Aledo-Serrano, Á and García-Morales, I and Toledano, R and Valls, J and Cosculluela, B and Munsó, L and Raurich, X and Trejo, A and Blanquez, D and Gil-Nagel, A}, title = {Artificial intelligence system, based on mjn-SERAS algorithm, for the early detection of seizures in patients with refractory focal epilepsy: A cross-sectional pilot study.}, journal = {Epilepsy & behavior reports}, volume = {22}, number = {}, pages = {100600}, pmid = {37252270}, issn = {2589-9864}, abstract = {Around one-third of epilepsy patients develop drug-resistant seizures; early detection of seizures could help improve safety, reduce patient anxiety, increase independence, and enable acute treatment. In recent years, the use of artificial intelligence techniques and machine learning algorithms in different diseases, including epilepsy, has increased significantly. The main objective of this study is to determine whether the mjn-SERAS artificial intelligence algorithm developed by MJN Neuroserveis, can detect seizures early using patient-specific data to create a personalized mathematical model based on EEG training, defined as the programmed recognition of oncoming seizures before they are primarily initiated, usually within a period of a few minutes, in patients diagnosed of epilepsy. Retrospective, cross-sectional, observational, multicenter study to determine the sensitivity and specificity of the artificial intelligence algorithm. We searched the database of the Epilepsy Units of three Spanish medical centers and selected 50 patients evaluated between January 2017 and February 2021, diagnosed with refractory focal epilepsy who underwent video-EEG monitoring recordings between 3 and 5 days, a minimum of 3 seizures per patient, lasting more than 5 s and the interval between each seizure was greater than 1 h. Exclusion criteria included age <18 years, intracranial EEG monitoring, and severe psychiatric, neurological, or systemic disorders. The algorithm identified pre-ictal and interictal patterns from EEG data using our learning algorithm and was compared to a senior epileptologist's evaluation as a gold standard. Individual mathematical models of each patient were trained using this feature dataset. A total of 1963 h of 49 video-EEG recordings were reviewed, with an average of 39.26 h per patient. The video-EEG monitoring recorded 309 seizures as subsequently analyzed by the epileptologists. The mjn-SERAS algorithm was trained on 119 seizures and split testing was performed on 188 seizures. The statistical analysis includes the data from each model and reports 10 false negatives (no detection of episodes recorded by video-EEG) and 22 false positives (alert detected without clinical correlation or abnormal EEG signal within 30 min). Specifically, the automated mjn-SERAS AI algorithm achieved a sensitivity of 94.7% (95 %; CI 94.67-94.73), and an F-Score representing specificity of 92.2% (95 %; CI 92.17-92.23) compared to the reference performance represented by a mean (harmonic mean or average) and a positive predictive value of 91%, with a false positive rate of 0.55 per 24 h in the patient-independent model. This patient-specific AI algorithm for early seizure detection shows promising results in terms of sensitivity and false positive rate. Although the algorithm requires high computational requirements on specialized servers cloud for training and computing, its computational load in real-time is low, allowing its implementation on embedded devices for online seizure detection.}, } @article {pmid37251849, year = {2023}, author = {Al-Sharafi, MA and Iranmanesh, M and Al-Emran, M and Alzahrani, AI and Herzallah, F and Jamil, N}, title = {Determinants of cloud computing integration and its impact on sustainable performance in SMEs: An empirical investigation using the SEM-ANN approach.}, journal = {Heliyon}, volume = {9}, number = {5}, pages = {e16299}, pmid = {37251849}, issn = {2405-8440}, abstract = {Although extant literature has thoroughly investigated the incorporation of cloud computing services, examining their influence on sustainable performance, particularly at the organizational level, is insufficient. Consequently, the present research aims to assess the factors that impact the integration of cloud computing within small and medium-sized enterprises (SMEs) and its subsequent effects on environmental, financial, and social performance. The data were collected from 415 SMEs and were analyzed using a hybrid SEM-ANN approach. PLS-SEM results indicate that relative advantage, complexity, compatibility, top management support, cost reduction, and government support significantly affect cloud computing integration. This study also empirically demonstrated that SMEs could improve their financial, environmental, and social performance by integrating cloud computing services. ANN results show that complexity, with a normalized importance (NI) of 89.14%, is ranked the first among other factors affecting cloud computing integration in SMEs. This is followed by cost reduction (NI = 82.67%), government support (NI = 73.37%), compatibility (NI = 70.02%), top management support (NI = 52.43%), and relative advantage (NI = 48.72%). Theoretically, this study goes beyond examining the determinants affecting cloud computing integration by examining their impact on SMEs' environmental, financial, and social performance in a comprehensive manner. The study also provides several practical implications for policymakers, SME managers, and cloud computing service providers.}, } @article {pmid37250444, year = {2023}, author = {Luscombe, DJ and Gatis, N and Anderson, K and Carless, D and Brazier, RE}, title = {Rapid, repeatable landscape-scale mapping of tree, hedgerow, and woodland habitats (THaW), using airborne LiDAR and spaceborne SAR data.}, journal = {Ecology and evolution}, volume = {13}, number = {5}, pages = {e10103}, pmid = {37250444}, issn = {2045-7758}, abstract = {In the UK, tree, hedgerow, and woodland (THaW) habitats are key havens for biodiversity and support many related ecosystem services. The UK is entering a period of agricultural policy realignment with respect to natural capital and climate change, meaning that now is a critical time to evaluate the distribution, resilience, and dynamics of THaW habitats. The fine-grained nature of habitats like hedgerows necessitates mapping of these features at relatively fine spatial resolution-and freely available public archives of airborne laser scanning (LiDAR) data at <2 m spatial resolution offer a means of doing so within UK settings. The high cost of LiDAR prohibits use for regular monitoring of THaW change, but space-borne sensors such as Sentinel-1 Synthetic Aperture Radar (SAR at ca. 10 m resolution) can potentially meet this need once baseline distributions are established. We address two aims in this manuscript-(1) to rapidly quantify THaW across UK landscapes using LiDAR data and (2) to monitor canopy change intra- and inter-annually using SAR data. We show that workflows applied to airborne LiDAR data can deliver THaW baselines at 2 m resolution, with positional accuracy of >90%. It was also possible to combine LiDAR mapping data and Sentinel-1 SAR data to rapidly track canopy change through time (i.e., every 3 months) using, cloud-based processing via Google Earth Engine. The resultant toolkit is also provided as an open-access web app. The results highlight that whilst nearly 90% of the tallest trees (above 15 m) are captured within the National Forest Inventory (NFI) database only 50% of THaW with a canopy height range of 3-15 m are recorded. Current estimates of tree distribution neglect these finer-grained features (i.e., smaller or less contiguous THaW canopies), which we argue will account for a significant proportion of landscape THaW cover.}, } @article {pmid37238577, year = {2023}, author = {Tang, Y and Jin, M and Meng, H and Yang, L and Zheng, C}, title = {Attribute-Based Verifiable Conditional Proxy Re-Encryption Scheme.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {5}, pages = {}, pmid = {37238577}, issn = {1099-4300}, support = {ICNS202006//the Shaanxi Key Laboratory of Information Communication Network and Security/ ; LNCT2022-A11//Henan Key Laboratory of Network Cryptography Technology/ ; }, abstract = {There are mostly semi-honest agents in cloud computing, so agents may perform unreliable calculations during the actual execution process. In this paper, an attribute-based verifiable conditional proxy re-encryption (AB-VCPRE) scheme using a homomorphic signature is proposed to solve the problem that the current attribute-based conditional proxy re-encryption (AB-CPRE) algorithm cannot detect the illegal behavior of the agent. The scheme implements robustness, that is the re-encryption ciphertext, can be verified by the verification server, showing that the received ciphertext is correctly converted by the agent from the original ciphertext, thus, meaning that illegal activities of agents can be effectively detected. In addition, the article demonstrates the reliability of the constructed AB-VCPRE scheme validation in the standard model, and proves that the scheme satisfies CPA security in the selective security model based on the learning with errors (LWE) assumption.}, } @article {pmid37238475, year = {2023}, author = {Zhao, M and Wang, H and Yao, B}, title = {Graphic Groups, Graph Homomorphisms, and Graphic Group Lattices in Asymmetric Topology Cryptography.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {5}, pages = {}, pmid = {37238475}, issn = {1099-4300}, support = {22JR5RA876//The Science and Technology Program of Gansu Province/ ; 61163054//National Natural Science Foundation of China/ ; 61363060//National Natural Science Foundation of China/ ; 61662066//National Natural Science Foundation of China/ ; }, abstract = {Using asymmetric topology cryptography to encrypt networks on the basis of topology coding is a new topic of cryptography, which consists of two major elements, i.e., topological structures and mathematical constraints. The topological signature of asymmetric topology cryptography is stored in the computer by matrices that can produce number-based strings for application. By means of algebra, we introduce every-zero mixed graphic groups, graphic lattices, and various graph-type homomorphisms and graphic lattices based on mixed graphic groups into cloud computing technology. The whole network encryption will be realized by various graphic groups.}, } @article {pmid37236950, year = {2023}, author = {Zhao, N and Zhao, YH and Zou, HF and Bai, XH and Zhen, Z}, title = {Spatial and temporal trends and drivers of fractional vegetation cover in Heilongjiang Province, China during 1990-2020.}, journal = {Ying yong sheng tai xue bao = The journal of applied ecology}, volume = {34}, number = {5}, pages = {1320-1330}, doi = {10.13287/j.1001-9332.202305.021}, pmid = {37236950}, issn = {1001-9332}, mesh = {Humans ; *Ecosystem ; Seasons ; China ; *Models, Theoretical ; Human Activities ; }, abstract = {Fractional vegetation cover (FVC) is a quantitative indicator for vegetation growth conditions and ecosystem change. Clarifying the spatial and temporal trends and driving factors of FVC is an important research content of global and regional ecological environment. Based on Google Earth Engine (GEE) cloud computing platform, we estimated FVC in Heilongjiang Province from 1990 to 2020 using the pixel dichotomous model. We analyzed the temporal and spatial trends and drivers of FVC using Mann-Kendall mutation test, Sen's slope analysis with Mann-Kendall significance test, correlation analysis, and structural equation model. The results showed that the estimated FVC based on the pixel dichotomous model had high accuracy (R[2]>0.7, root mean square error <0.1, relative root mean square error <14%). From 1990 to 2020, the annual average FVC in Heilongjiang was 0.79, with a fluctuating upward trend (0.72-0.85) and an average annual growth rate of 0.4%. The annual average FVC at the municipal administrative districts level also showed different levels of increase of FVC. The area with extremely high FVC dominated the Heilongjiang Province with a gradual increase proportion. The area with increasing trend of FVC accounted for 67.4% of the total area, whereas the area with decreasing trend only accounted for 26.2%, and the rest remained unchanged. The correlation of human activity factor on annual average FVC was higher than that of growing season monthly average meteorological factor. The human activity factor was the main driver for FVC change in Heilongjiang Province, followed by land use type. The total effect of monthly average meteorological factor during the growing season on FVC change was negative. The results would serve as technical support for long-term FVC monitoring and driving force analysis in Heilongjiang Province, and provide a reference for ecological environment restoration and protection, as well as the formulation of related land use policy.}, } @article {pmid37233602, year = {2023}, author = {Schranghamer, TF and Pannone, A and Ravichandran, H and Stepanoff, SP and Trainor, N and Redwing, JM and Wolfe, DE and Das, S}, title = {Radiation Resilient Two-Dimensional Electronics.}, journal = {ACS applied materials & interfaces}, volume = {15}, number = {22}, pages = {26946-26959}, doi = {10.1021/acsami.3c02406}, pmid = {37233602}, issn = {1944-8252}, abstract = {Limitations in cloud-based computing have prompted a paradigm shift toward all-in-one "edge" devices capable of independent data sensing, computing, and storage. Advanced defense and space applications stand to benefit immensely from this due to their need for continual operation in areas where maintaining remote oversight is difficult. However, the extreme environments relevant to these applications necessitate rigorous testing of technologies, with a common requirement being hardness to ionizing radiation. Two-dimensional (2D) molybdenum disulfide (MoS2) has been noted to enable the sensing, storage, and logic capabilities necessary for all-in-one edge devices. Despite this, the investigation of ionizing radiation effects in MoS2-based devices remains incomplete. In particular, studies on gamma radiation effects in MoS2 have been largely limited to standalone films, with few device investigations; to the best of our knowledge, no explorations have been made into gamma radiation effects on the sensing and memory capabilities of MoS2-based devices. In this work, we have used a statistical approach to study high-dose (1 Mrad) gamma radiation effects on photosensitive and programmable memtransistors fabricated from large-area monolayer MoS2. Memtransistors were divided into separate groups to ensure accurate extraction of device characteristics pertaining to baseline performance, sensing, and memory before and after irradiation. All-MoS2 logic gates were also assessed to determine the gamma irradiation impact on logic implementation. Our findings show that the multiple functionalities of MoS2 memtransistors are not severely impacted by gamma irradiation even without dedicated shielding/mitigation techniques. We believe that these results serve as a foundation for more application-oriented studies going forward.}, } @article {pmid37220560, year = {2023}, author = {Behbehani, D and Komninos, N and Al-Begain, K and Rajarajan, M}, title = {Cloud Enterprise Dynamic Risk Assessment (CEDRA): a dynamic risk assessment using dynamic Bayesian networks for cloud environment.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {12}, number = {1}, pages = {79}, pmid = {37220560}, issn = {2192-113X}, abstract = {Cloud computing adoption has been increasing rapidly amid COVID-19 as organisations accelerate the implementation of their digital strategies. Most models adopt traditional dynamic risk assessment, which does not adequately quantify or monetise risks to enable business-appropriate decision-making. In view of this challenge, a new model is proposed in this paper for assignment of monetary losses terms to the consequences nodes, thereby enabling experts to understand better the financial risks of any consequence. The proposed model is named Cloud Enterprise Dynamic Risk Assessment (CEDRA) model that uses CVSS, threat intelligence feeds and information about exploitation availability in the wild using dynamic Bayesian networks to predict vulnerability exploitations and financial losses. A case study of a scenario based on the Capital One breach attack was conducted to demonstrate experimentally the applicability of the model proposed in this paper. The methods presented in this study has improved vulnerability and financial losses prediction.}, } @article {pmid37198391, year = {2023}, author = {Halder, B and Ahmadianfar, I and Heddam, S and Mussa, ZH and Goliatt, L and Tan, ML and Sa'adi, Z and Al-Khafaji, Z and Al-Ansari, N and Jawad, AH and Yaseen, ZM}, title = {Machine learning-based country-level annual air pollutants exploration using Sentinel-5P and Google Earth Engine.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {7968}, pmid = {37198391}, issn = {2045-2322}, abstract = {Climatic condition is triggering human health emergencies and earth's surface changes. Anthropogenic activities, such as built-up expansion, transportation development, industrial works, and some extreme phases, are the main reason for climate change and global warming. Air pollutants are increased gradually due to anthropogenic activities and triggering the earth's health. Nitrogen Dioxide (NO2), Carbon Monoxide (CO), and Aerosol Optical Depth (AOD) are truthfully important for air quality measurement because those air pollutants are more harmful to the environment and human's health. Earth observational Sentinel-5P is applied for monitoring the air pollutant and chemical conditions in the atmosphere from 2018 to 2021. The cloud computing-based Google Earth Engine (GEE) platform is applied for monitoring those air pollutants and chemical components in the atmosphere. The NO2 variation indicates high during the time because of the anthropogenic activities. Carbon Monoxide (CO) is also located high between two 1-month different maps. The 2020 and 2021 results indicate AQI change is high where 2018 and 2019 indicates low AQI throughout the year. The Kolkata have seven AQI monitoring station where high nitrogen dioxide recorded 102 (2018), 48 (2019), 26 (2020) and 98 (2021), where Delhi AQI stations recorded 99 (2018), 49 (2019), 37 (2020), and 107 (2021). Delhi, Kolkata, Mumbai, Pune, and Chennai recorded huge fluctuations of air pollutants during the study periods, where ~ 50-60% NO2 was recorded as high in the recent time. The AOD was noticed high in Uttar Pradesh in 2020. These results indicate that air pollutant investigation is much necessary for future planning and management otherwise; our planet earth is mostly affected by the anthropogenic and climatic conditions where maybe life does not exist.}, } @article {pmid37192819, year = {2023}, author = {Ahalt, S and Avillach, P and Boyles, R and Bradford, K and Cox, S and Davis-Dusenbery, B and Grossman, RL and Krishnamurthy, A and Manning, A and Paten, B and Philippakis, A and Borecki, I and Chen, SH and Kaltman, J and Ladwa, S and Schwartz, C and Thomson, A and Davis, S and Leaf, A and Lyons, J and Sheets, E and Bis, JC and Conomos, M and Culotti, A and Desain, T and Digiovanna, J and Domazet, M and Gogarten, S and Gutierrez-Sacristan, A and Harris, T and Heavner, B and Jain, D and O'Connor, B and Osborn, K and Pillion, D and Pleiness, J and Rice, K and Rupp, G and Serret-Larmande, A and Smith, A and Stedman, JP and Stilp, A and Barsanti, T and Cheadle, J and Erdmann, C and Farlow, B and Gartland-Gray, A and Hayes, J and Hiles, H and Kerr, P and Lenhardt, C and Madden, T and Mieczkowska, JO and Miller, A and Patton, P and Rathbun, M and Suber, S and Asare, J}, title = {Building a collaborative cloud platform to accelerate heart, lung, blood, and sleep research.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {30}, number = {7}, pages = {1293-1300}, pmid = {37192819}, issn = {1527-974X}, support = {OT3 HL142478/HL/NHLBI NIH HHS/United States ; R01 HL120393/HL/NHLBI NIH HHS/United States ; U01 HL120393/HL/NHLBI NIH HHS/United States ; OT3 HL142481/HL/NHLBI NIH HHS/United States ; OT3 HL147154/HL/NHLBI NIH HHS/United States ; HHSN268201800001C/HL/NHLBI NIH HHS/United States ; OT3 HL142480/HL/NHLBI NIH HHS/United States ; OT3 HL142479/HL/NHLBI NIH HHS/United States ; HHSN268201000001I/HL/NHLBI NIH HHS/United States ; }, mesh = {Humans ; *Cloud Computing ; Ecosystem ; Reproducibility of Results ; *COVID-19 ; Lung ; Software ; }, abstract = {Research increasingly relies on interrogating large-scale data resources. The NIH National Heart, Lung, and Blood Institute developed the NHLBI BioData CatalystⓇ (BDC), a community-driven ecosystem where researchers, including bench and clinical scientists, statisticians, and algorithm developers, find, access, share, store, and compute on large-scale datasets. This ecosystem provides secure, cloud-based workspaces, user authentication and authorization, search, tools and workflows, applications, and new innovative features to address community needs, including exploratory data analysis, genomic and imaging tools, tools for reproducibility, and improved interoperability with other NIH data science platforms. BDC offers straightforward access to large-scale datasets and computational resources that support precision medicine for heart, lung, blood, and sleep conditions, leveraging separately developed and managed platforms to maximize flexibility based on researcher needs, expertise, and backgrounds. Through the NHLBI BioData Catalyst Fellows Program, BDC facilitates scientific discoveries and technological advances. BDC also facilitated accelerated research on the coronavirus disease-2019 (COVID-19) pandemic.}, } @article {pmid37190404, year = {2023}, author = {Li, J and Fan, Y and Bian, X and Yuan, Q}, title = {Online/Offline MA-CP-ABE with Cryptographic Reverse Firewalls for IoT.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {4}, pages = {}, pmid = {37190404}, issn = {1099-4300}, support = {61872204//National Natural Science Foundation of China/ ; LH2020F050//Natural Science Foundation of Heilongjiang Province of China/ ; 2021-KYYWF-0016//Fundamental Research Funds for Heilongjiang Universities of China/ ; 135309453//Science Research project of Basic scientific research business expenses in Heilongjiang Provincical colleges and universities of China/ ; }, abstract = {Devices in the Internet of Things (IoT) usually use cloud storage and cloud computing to save storage and computing cost. Therefore, the efficient realization of one-to-many communication of data on the premise of ensuring the security of cloud storage data is a challenge. Ciphertext-Policy Attribute-Based Encryption (CP-ABE) can not only protect the security of data in the cloud and achieve one-to-many communication but also achieve fine-grained access control for data. However, the single-authority CP-ABE faces the crisis of single point of failure. In order to improve security, the Multi-Authority CP-ABE (MA-CP-ABE) is adopted. Although there are provably-secure MA-CP-ABE schemes, Edward Snowden's research shows that provably-secure cryptographic schemes are vulnerable to backdoor attacks, resulting in secret disclosure, and thus threatening security. In addition, ABE requires huge computational overhead in key generation, encryption and decryption, which increase with the increase in the number of attributes and the complexity of the access structure, and there are a large number of resource-constrained devices in the IoT. To mitigate this issue, we construct the Online/Offline MA-CP-ABE with Cryptographic Reverse Firewalls (OO-MA-CP-ABE-CRFs) scheme. This scheme not only uses Cryptographic Reverse Firewall (CRF) to resist backdoor attacks but also uses online/offline key generation, online/offline encryption and outsourcing encryption technology to optimize the efficiency of the MA-CP-ABE scheme with reverse firewall, reducing the storage and computing cost of users. Finally, the security of the OO-MA-CP-ABE-CRFs scheme is proved, and the experimental results indicate that the scheme is efficient and practical.}, } @article {pmid37190351, year = {2023}, author = {Panchikkil, S and Manikandan, VM and Zhang, Y and Wang, S}, title = {A Multi-Directional Pixel-Swapping Approach (MPSA) for Entropy-Retained Reversible Data Hiding in Encrypted Images.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {4}, pages = {}, pmid = {37190351}, issn = {1099-4300}, support = {MC_PC_17171/MRC_/Medical Research Council/United Kingdom ; }, abstract = {Reversible data hiding (RDH), a promising data-hiding technique, is widely examined in domains such as medical image transmission, satellite image transmission, crime investigation, cloud computing, etc. None of the existing RDH schemes addresses a solution from a real-time aspect. A good compromise between the information embedding rate and computational time makes the scheme suitable for real-time applications. As a solution, we propose a novel RDH scheme that recovers the original image by retaining its quality and extracting the hidden data. Here, the cover image gets encrypted using a stream cipher and is partitioned into non-overlapping blocks. Secret information is inserted into the encrypted blocks of the cover image via a controlled local pixel-swapping approach to achieve a comparatively good payload. The new scheme MPSA allows the data hider to hide two bits in every encrypted block. The existing reversible data-hiding schemes modify the encrypted image pixels leading to a compromise in image security. However, the proposed work complements the support of encrypted image security by maintaining the same entropy of the encrypted image in spite of hiding the data. Experimental results illustrate the competency of the proposed work accounting for various parameters, including embedding rate and computational time.}, } @article {pmid37187785, year = {2023}, author = {Cabrero-Holgueras, J and Pastrana, S}, title = {Towards realistic privacy-preserving deep learning over encrypted medical data.}, journal = {Frontiers in cardiovascular medicine}, volume = {10}, number = {}, pages = {1117360}, pmid = {37187785}, issn = {2297-055X}, abstract = {Cardiovascular disease supposes a substantial fraction of healthcare systems. The invisible nature of these pathologies demands solutions that enable remote monitoring and tracking. Deep Learning (DL) has arisen as a solution in many fields, and in healthcare, multiple successful applications exist for image enhancement and health outside hospitals. However, the computational requirements and the need for large-scale datasets limit DL. Thus, we often offload computation onto server infrastructure, and various Machine-Learning-as-a-Service (MLaaS) platforms emerged from this need. These enable the conduction of heavy computations in a cloud infrastructure, usually equipped with high-performance computing servers. Unfortunately, the technical barriers persist in healthcare ecosystems since sending sensitive data (e.g., medical records or personally identifiable information) to third-party servers involves privacy and security concerns with legal and ethical implications. In the scope of Deep Learning for Healthcare to improve cardiovascular health, Homomorphic Encryption (HE) is a promising tool to enable secure, private, and legal health outside hospitals. Homomorphic Encryption allows for privacy-preserving computations over encrypted data, thus preserving the privacy of the processed information. Efficient HE requires structural optimizations to perform the complex computation of the internal layers. One such optimization is Packed Homomorphic Encryption (PHE), which encodes multiple elements on a single ciphertext, allowing for efficient Single Instruction over Multiple Data (SIMD) operations. However, using PHE in DL circuits is not straightforward, and it demands new algorithms and data encoding, which existing literature has not adequately addressed. To fill this gap, in this work, we elaborate on novel algorithms to adapt the linear algebra operations of DL layers to PHE. Concretely, we focus on Convolutional Neural Networks. We provide detailed descriptions and insights into the different algorithms and efficient inter-layer data format conversion mechanisms. We formally analyze the complexity of the algorithms in terms of performance metrics and provide guidelines and recommendations for adapting architectures that deal with private data. Furthermore, we confirm the theoretical analysis with practical experimentation. Among other conclusions, we prove that our new algorithms speed up the processing of convolutional layers compared to the existing proposals.}, } @article {pmid37181330, year = {2023}, author = {Dahlquist, JM and Nelson, SC and Fullerton, SM}, title = {Cloud-based biomedical data storage and analysis for genomic research: Landscape analysis of data governance in emerging NIH-supported platforms.}, journal = {HGG advances}, volume = {4}, number = {3}, pages = {100196}, pmid = {37181330}, issn = {2666-2477}, support = {R21 HG011501/HG/NHGRI NIH HHS/United States ; }, mesh = {Humans ; *Cloud Computing ; *Population Health ; Genomics/methods ; Genome ; Information Storage and Retrieval ; }, abstract = {The storage, sharing, and analysis of genomic data poses technical and logistical challenges that have precipitated the development of cloud-based computing platforms designed to facilitate collaboration and maximize the scientific utility of data. To understand cloud platforms' policies and procedures and the implications for different stakeholder groups, in summer 2021, we reviewed publicly available documents (N = 94) sourced from platform websites, scientific literature, and lay media for five NIH-funded cloud platforms (the All of Us Research Hub, NHGRI AnVIL, NHLBI BioData Catalyst, NCI Genomic Data Commons, and the Kids First Data Resource Center) and a pre-existing data sharing mechanism, dbGaP. Platform policies were compared across seven categories of data governance: data submission, data ingestion, user authentication and authorization, data security, data access, auditing, and sanctions. Our analysis finds similarities across the platforms, including reliance on a formal data ingestion process, multiple tiers of data access with varying user authentication and/or authorization requirements, platform and user data security measures, and auditing for inappropriate data use. Platforms differ in how data tiers are organized, as well as the specifics of user authentication and authorization across access tiers. Our analysis maps elements of data governance across emerging NIH-funded cloud platforms and as such provides a key resource for stakeholders seeking to understand and utilize data access and analysis options across platforms and to surface aspects of governance that may require harmonization to achieve the desired interoperability.}, } @article {pmid37177753, year = {2023}, author = {Cicceri, G and Tricomi, G and D'Agati, L and Longo, F and Merlino, G and Puliafito, A}, title = {A Deep Learning-Driven Self-Conscious Distributed Cyber-Physical System for Renewable Energy Communities.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177753}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) is transforming various domains, including smart energy management, by enabling the integration of complex digital and physical components in distributed cyber-physical systems (DCPSs). The design of DCPSs has so far been focused on performance-related, non-functional requirements. However, with the growing power consumption and computation expenses, sustainability is becoming an important aspect to consider. This has led to the concept of energy-aware DCPSs, which integrate conventional non-functional requirements with additional attributes for sustainability, such as energy consumption. This research activity aimed to investigate and develop energy-aware architectural models and edge/cloud computing technologies to design next-generation, AI-enabled (and, specifically, deep-learning-enhanced), self-conscious IoT-extended DCPSs. Our key contributions include energy-aware edge-to-cloud architectural models and technologies, the orchestration of a (possibly federated) edge-to-cloud infrastructure, abstractions and unified models for distributed heterogeneous virtualized resources, innovative machine learning algorithms for the dynamic reallocation and reconfiguration of energy resources, and the management of energy communities. The proposed solution was validated through case studies on optimizing renewable energy communities (RECs), or energy-aware DCPSs, which are particularly challenging due to their unique requirements and constraints; in more detail, in this work, we aim to define the optimal implementation of an energy-aware DCPS. Moreover, smart grids play a crucial role in developing energy-aware DCPSs, providing a flexible and efficient power system integrating renewable energy sources, microgrids, and other distributed energy resources. The proposed energy-aware DCPSs contribute to the development of smart grids by providing a sustainable, self-consistent, and efficient way to manage energy distribution and consumption. The performance demonstrates our approach's effectiveness for consumption and production (based on RMSE and MAE metrics). Our research supports the transition towards a more sustainable future, where communities adopting REC principles become key players in the energy landscape.}, } @article {pmid37177697, year = {2023}, author = {Mamede, H and Neves, JC and Martins, J and Gonçalves, R and Branco, F}, title = {A Prototype for an Intelligent Water Management System for Household Use.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177697}, issn = {1424-8220}, support = {LA/P/0063/2020//Fundação para a Ciência e Tecnologia/ ; }, abstract = {Water scarcity is becoming an issue of more significant concern with a major impact on global sustainability. For it, new measures and approaches are urgently needed. Digital technologies and tools can play an essential role in improving the effectiveness and efficiency of current water management approaches. Therefore, a solution is proposed and validated, given the limited presence of models or technological architectures in the literature to support intelligent water management systems for domestic use. It is based on a layered architecture, fully designed to meet the needs of households and to do so through the adoption of technologies such as the Internet of Things and cloud computing. By developing a prototype and using it as a use case for testing purposes, we have concluded the positive impact of using such a solution. Considering this is a first contribution to overcome the problem, some issues will be addressed in a future work, namely, data and device security and energy and traffic optimisation issues, among several others.}, } @article {pmid37177672, year = {2023}, author = {Micko, K and Papcun, P and Zolotova, I}, title = {Review of IoT Sensor Systems Used for Monitoring the Road Infrastructure.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177672}, issn = {1424-8220}, support = {APVV-20-0247//Slovak Research and Development Agency/ ; }, abstract = {An intelligent transportation system is one of the fundamental goals of the smart city concept. The Internet of Things (IoT) concept is a basic instrument to digitalize and automatize the process in the intelligent transportation system. Digitalization via the IoT concept enables the automatic collection of data usable for management in the transportation system. The IoT concept includes a system of sensors, actuators, control units and computational distribution among the edge, fog and cloud layers. The study proposes a taxonomy of sensors used for monitoring tasks based on motion detection and object tracking in intelligent transportation system tasks. The sensor's taxonomy helps to categorize the sensors based on working principles, installation or maintenance methods and other categories. The sensor's categorization enables us to compare the effectiveness of each sensor's system. Monitoring tasks are analyzed, categorized, and solved in intelligent transportation systems based on a literature review and focusing on motion detection and object tracking methods. A literature survey of sensor systems used for monitoring tasks in the intelligent transportation system was performed according to sensor and monitoring task categorization. In this review, we analyzed the achieved results to measure, sense, or classify events in intelligent transportation system monitoring tasks. The review conclusions were used to propose an architecture of the universal sensor system for common monitoring tasks based on motion detection and object tracking methods in intelligent transportation tasks. The proposed architecture was built and tested for the first experimental results in the case study scenario. Finally, we propose methods that could significantly improve the results in the following research.}, } @article {pmid37177663, year = {2023}, author = {Esposito, M and Belli, A and Palma, L and Pierleoni, P}, title = {Design and Implementation of a Framework for Smart Home Automation Based on Cellular IoT, MQTT, and Serverless Functions.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177663}, issn = {1424-8220}, abstract = {Smart objects and home automation tools are becoming increasingly popular, and the number of smart devices that each dedicated application has to manage is increasing accordingly. The emergence of technologies such as serverless computing and dedicated machine-to-machine communication protocols represents a valuable opportunity to facilitate management of smart objects and replicability of new solutions. The aim of this paper is to propose a framework for home automation applications that can be applied to control and monitor any appliance or object in a smart home environment. The proposed framework makes use of a dedicated messages-exchange protocol based on MQTT and cloud-deployed serverless functions. Furthermore, a vocal command interface is implemented to let users control the smart object with vocal interactions, greatly increasing the accessibility and intuitiveness of the proposed solution. A smart object, namely a smart kitchen fan extractor system, was developed, prototyped, and tested to illustrate the viability of the proposed solution. The smart object is equipped with a narrowband IoT (NB-IoT) module to send and receive commands to and from the cloud. In order to evaluate the performance of the proposed solution, the suitability of NB-IoT for the transmission of MQTT messages was evaluated. The results show how NB-IoT has an acceptable latency performance despite some minimal packet loss.}, } @article {pmid37177654, year = {2023}, author = {Carvalho, J and Vieira, D and Rodrigues, C and Trinta, F}, title = {LM[2]K Model for Hosting an Application Based on Microservices in Multi-Cloud.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177654}, issn = {1424-8220}, abstract = {Cloud computing has become a popular delivery model service, offering several advantages. However, there are still challenges that need to be addressed when applying the cloud model to specific scenarios. Two of such challenges involve deploying and executing applications across multiple providers, each comprising several services with similar functionalities and different capabilities. Therefore, dealing with application distributions across various providers can be a complex task for a software architect due to the differing characteristics of the application components. Some works have proposed solutions to address the challenges discussed here, but most of them focus on service providers. To facilitate the decision-making process of software architects, we previously presented PacificClouds, an architecture for managing the deployment and execution of applications based on microservices and distributed in a multi-cloud environment. Therefore, in this work, we focus on the challenges of selecting multiple clouds for PacificClouds and choosing providers that best meet the microservices and software architect requirements. We propose a selection model and three approaches to address various scenarios. We evaluate the performance of the approaches and conduct a comparative analysis of them. The results demonstrate their feasibility regarding performance.}, } @article {pmid37177653, year = {2023}, author = {Tošić, A and Vičič, J and Burnard, M and Mrissa, M}, title = {A Blockchain Protocol for Real-Time Application Migration on the Edge.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177653}, issn = {1424-8220}, support = {J2-2504//Slovenian Research Agency/ ; 739574//European Commission/ ; 857188//European Commission/ ; }, abstract = {The Internet of Things (IoT) is experiencing widespread adoption across industry sectors ranging from supply chain management to smart cities, buildings, and health monitoring. However, most software architectures for the IoT deployment rely on centralized cloud computing infrastructures to provide storage and computing power, as cloud providers have high economic incentives to organize their infrastructure into clusters. Despite these incentives, there has been a recent shift from centralized to decentralized architectures that harness the potential of edge devices, reduce network latency, and lower infrastructure costs to support IoT applications. This shift has resulted in new edge computing architectures, but many still rely on centralized solutions for managing applications. A truly decentralized approach would offer interesting properties required for IoT use cases. In this paper, we introduce a decentralized architecture tailored for large-scale deployments of peer-to-peer IoT sensor networks and capable of run-time application migration. We propose a leader election consensus protocol for permissioned distributed networks that only requires one series of messages in order to commit to a change. The solution combines a blockchain consensus protocol using Verifiable Delay Functions (VDF) to achieve decentralized randomness, fault tolerance, transparency, and no single point of failure. We validate our solution by testing and analyzing the performance of our reference implementation. Our results show that nodes are able to reach consensus consistently, and the VDF proofs can be used as an entropy pool for decentralized randomness. We show that our system can perform autonomous real-time application migrations. Finally, we conclude that the implementation is scalable by testing it on 100 consensus nodes running 200 applications.}, } @article {pmid37177615, year = {2023}, author = {Vergara, J and Botero, J and Fletscher, L}, title = {A Comprehensive Survey on Resource Allocation Strategies in Fog/Cloud Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177615}, issn = {1424-8220}, abstract = {The growing number of connected objects has allowed the development of new applications in different areas. In addition, the technologies that support these applications, such as cloud and fog computing, face challenges in providing the necessary resources to process information for different applications due to the highly dynamic nature of these networks and the many heterogeneous devices involved. This article reviews the existing literature on one of these challenges: resource allocation in the fog-cloud continuum, including approaches that consider different strategies and network characteristics. We also discuss the factors influencing resource allocation decisions, such as energy consumption, latency, monetary cost, or network usage. Finally, we identify the open research challenges and highlight potential future directions. This survey article aims to serve as a valuable reference for researchers and practitioners interested in the field of edge computing and resource allocation.}, } @article {pmid37177587, year = {2023}, author = {Ma, H and Zhou, D and Li, P and Wang, X}, title = {EVOAC-HP: An Efficient and Verifiable Outsourced Access Control Scheme with Hidden Policy.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177587}, issn = {1424-8220}, support = {61932010//the National Natural Science Foundation of China/ ; GPKLPSNS-2022-KF-05//Guangdong Provincial Key Laboratory of Power System Network Security/ ; }, abstract = {As medical data become increasingly important in healthcare, it is crucial to have proper access control mechanisms, ensuring that sensitive data are only accessible to authorized users while maintaining privacy and security. Ciphertext-Policy Attribute-Based Encryption (CP-ABE) is an attractive access control solution that can offer effective, fine-grained and secure medical data sharing, but it has two major drawbacks: Firstly, decryption is computationally expensive for resource-limited data users, especially when the access policy has many attributes, limiting its use in large-scale data-sharing scenarios. Secondly, existing schemes are based on data users' attributes, which can potentially reveal sensitive information about the users, especially in healthcare data sharing, where strong privacy and security are essential. To address these issues, we designed an improved CP-ABE scheme that provides efficient and verifiable outsourced access control with fully hidden policy named EVOAC-HP. In this paper, we utilize the attribute bloom filter to achieve policy hiding without revealing user privacy. For the purpose of alleviating the decryption burden for data users, we also adopt the technique of outsourced decryption to outsource the heavy computation overhead to the cloud service provider (CSP) with strong computing and storage capabilities, while the transformed ciphertext results can be verified by the data user. Finally, with rigorous security and reliable performance analysis, we demonstrate that EVOAC-HP is both practical and effective with robust privacy protection.}, } @article {pmid37177514, year = {2023}, author = {Dias, J and Simões, P and Soares, N and Costa, CM and Petry, MR and Veiga, G and Rocha, LF}, title = {Comparison of 3D Sensors for Automating Bolt-Tightening Operations in the Automotive Industry.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177514}, issn = {1424-8220}, support = {NORTE-01-0247-FEDER- 324 072550//This work has received funding from the ERDF - European Regional Development Fund, 322 through the North Portugal Regional Operational - NORTE 2020 programme under the Portugal 323 2020 Partnership Agreement within project EuroBot./ ; }, abstract = {Machine vision systems are widely used in assembly lines for providing sensing abilities to robots to allow them to handle dynamic environments. This paper presents a comparison of 3D sensors for evaluating which one is best suited for usage in a machine vision system for robotic fastening operations within an automotive assembly line. The perception system is necessary for taking into account the position uncertainty that arises from the vehicles being transported in an aerial conveyor. Three sensors with different working principles were compared, namely laser triangulation (SICK TriSpector1030), structured light with sequential stripe patterns (Photoneo PhoXi S) and structured light with infrared speckle pattern (Asus Xtion Pro Live). The accuracy of the sensors was measured by computing the root mean square error (RMSE) of the point cloud registrations between their scans and two types of reference point clouds, namely, CAD files and 3D sensor scans. Overall, the RMSE was lower when using sensor scans, with the SICK TriSpector1030 achieving the best results (0.25 mm ± 0.03 mm), the Photoneo PhoXi S having the intermediate performance (0.49 mm ± 0.14 mm) and the Asus Xtion Pro Live obtaining the higher RMSE (1.01 mm ± 0.11 mm). Considering the use case requirements, the final machine vision system relied on the SICK TriSpector1030 sensor and was integrated with a collaborative robot, which was successfully deployed in an vehicle assembly line, achieving 94% success in 53,400 screwing operations.}, } @article {pmid37177511, year = {2023}, author = {Li, X and Yi, Z and Li, R and Wang, XA and Li, H and Yang, X}, title = {SM2-Based Offline/Online Efficient Data Integrity Verification Scheme for Multiple Application Scenarios.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177511}, issn = {1424-8220}, support = {[62172436]//National Natural Science Foundation of China/ ; [62102452]//National Natural Science Foundation of China/ ; }, abstract = {With the rapid development of cloud storage and cloud computing technology, users tend to store data in the cloud for more convenient services. In order to ensure the integrity of cloud data, scholars have proposed cloud data integrity verification schemes to protect users' data security. The storage environment of the Internet of Things, in terms of big data and medical big data, demonstrates a stronger demand for data integrity verification schemes, but at the same time, the comprehensive function of data integrity verification schemes is required to be higher. Existing data integrity verification schemes are mostly applied in the cloud storage environment but cannot successfully be applied to the environment of the Internet of Things in the context of big data storage and medical big data storage. To solve this problem when combined with the characteristics and requirements of Internet of Things data storage and medical data storage, we designed an SM2-based offline/online efficient data integrity verification scheme. The resulting scheme uses the SM4 block cryptography algorithm to protect the privacy of the data content and uses a dynamic hash table to realize the dynamic updating of data. Based on the SM2 signature algorithm, the scheme can also realize offline tag generation and batch audits, reducing the computational burden of users. In security proof and efficiency analysis, the scheme has proven to be safe and efficient and can be used in a variety of application scenarios.}, } @article {pmid37177424, year = {2023}, author = {Hadjkouider, AM and Kerrache, CA and Korichi, A and Sahraoui, Y and Calafate, CT}, title = {Stackelberg Game Approach for Service Selection in UAV Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177424}, issn = {1424-8220}, abstract = {Nowadays, mobile devices are expected to perform a growing number of tasks, whose complexity is also increasing significantly. However, despite great technological improvements in the last decade, such devices still have limitations in terms of processing power and battery lifetime. In this context, mobile edge computing (MEC) emerges as a possible solution to address such limitations, being able to provide on-demand services to the customer, and bringing closer several services published in the cloud with a reduced cost and fewer security concerns. On the other hand, Unmanned Aerial Vehicle (UAV) networking emerged as a paradigm offering flexible services, new ephemeral applications such as safety and disaster management, mobile crowd-sensing, and fast delivery, to name a few. However, to efficiently use these services, discovery and selection strategies must be taken into account. In this context, discovering the services made available by a UAV-MEC network, and selecting the best services among those available in a timely and efficient manner, can become a challenging task. To face these issues, game theory methods have been proposed in the literature that perfectly suit the case of UAV-MEC services by modeling this challenge as a Stackelberg game, and using existing approaches to find the solution for such a game aiming at an efficient services' discovery and service selection. Hence, the goal of this paper is to propose Stackelberg-game-based solutions for service discovery and selection in the context of UAV-based mobile edge computing. Simulations results conducted using the NS-3 simulator highlight the efficiency of our proposed game in terms of price and QoS metrics.}, } @article {pmid37177402, year = {2023}, author = {Dang, VA and Vu Khanh, Q and Nguyen, VH and Nguyen, T and Nguyen, DC}, title = {Intelligent Healthcare: Integration of Emerging Technologies and Internet of Things for Humanity.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177402}, issn = {1424-8220}, mesh = {Humans ; *Internet of Things ; Internet ; Gold ; Intelligence ; Delivery of Health Care ; }, abstract = {Health is gold, and good health is a matter of survival for humanity. The development of the healthcare industry aligns with the development of humans throughout history. Nowadays, along with the strong growth of science and technology, the medical domain in general and the healthcare industry have achieved many breakthroughs, such as remote medical examination and treatment applications, pandemic prediction, and remote patient health monitoring. The advent of 5th generation communication networks in the early 2020s led to the Internet of Things concept. Moreover, the 6th generation communication networks (so-called 6G) expected to launch in 2030 will be the next revolution of the IoT era, and will include autonomous IoT systems and form a series of endogenous intelligent applications that serve humanity. One of the domains that receives the most attention is smart healthcare. In this study, we conduct a comprehensive survey of IoT-based technologies and solutions in the medical field. Then, we propose an all-in-one computing architecture for real-time IoHT applications and present possible solutions to achieving the proposed architecture. Finally, we discuss challenges, open issues, and future research directions. We hope that the results of this study will serve as essential guidelines for further research in the human healthcare domain.}, } @article {pmid37172382, year = {2023}, author = {Feraud, M and O'Brien, JW and Samanipour, S and Dewapriya, P and van Herwerden, D and Kaserzon, S and Wood, I and Rauert, C and Thomas, KV}, title = {InSpectra - A platform for identifying emerging chemical threats.}, journal = {Journal of hazardous materials}, volume = {455}, number = {}, pages = {131486}, doi = {10.1016/j.jhazmat.2023.131486}, pmid = {37172382}, issn = {1873-3336}, abstract = {Non-target analysis (NTA) employing high-resolution mass spectrometry (HRMS) coupled with liquid chromatography is increasingly being used to identify chemicals of biological relevance. HRMS datasets are large and complex making the identification of potentially relevant chemicals extremely challenging. As they are recorded in vendor-specific formats, interpreting them is often reliant on vendor-specific software that may not accommodate advancements in data processing. Here we present InSpectra, a vendor independent automated platform for the systematic detection of newly identified emerging chemical threats. InSpectra is web-based, open-source/access and modular providing highly flexible and extensible NTA and suspect screening workflows. As a cloud-based platform, InSpectra exploits parallel computing and big data archiving capabilities with a focus for sharing and community curation of HRMS data. InSpectra offers a reproducible and transparent approach for the identification, tracking and prioritisation of emerging chemical threats.}, } @article {pmid37172351, year = {2023}, author = {Zerouali, B and Santos, CAG and do Nascimento, TVM and Silva, RMD}, title = {A cloud-integrated GIS for forest cover loss and land use change monitoring using statistical methods and geospatial technology over northern Algeria.}, journal = {Journal of environmental management}, volume = {341}, number = {}, pages = {118029}, doi = {10.1016/j.jenvman.2023.118029}, pmid = {37172351}, issn = {1095-8630}, mesh = {Humans ; *Geographic Information Systems ; *Conservation of Natural Resources/methods ; Algeria ; Agriculture ; Environmental Monitoring/methods ; Technology ; }, abstract = {Over the last two decades, forest cover has experienced significant impacts from fires and deforestation worldwide due to direct human activities and climate change. This paper assesses trends in forest cover loss and land use and land cover changes in northern Algeria between 2000 and 2020 using datasets extracted from Google Earth Engine (GEE), such as the Hanssen Global Forest Change and MODIS Land Cover Type products (MCD12Q1). Classification was performed using the pixel-based supervised machine-learning algorithm called Random Forest (RF). Trends were analyzed using methods such as Mann-Kendall and Sen. The study area comprises 17 basins with high rainfall variability. The results indicated that the forest area decreased by 64.96%, from 3718 to 1266 km[2], during the 2000-2020 period, while the barren area increased by 40%, from 134,777 to 188,748 km[2]. The findings revealed that the Constantinois-Seybousse-Mellegue hydrographic basin was the most affected by deforestation and cover loss, exceeding 50% (with an area of 1018 km[2]), while the Seybouse River basin experienced the highest percentage of cover loss at 40%. Nonparametric tests showed that seven river basins (41%) had significantly increasing trends of forest cover loss. According to the obtained results, the forest loss situation in Algeria, especially in the northeastern part, is very alarming and requires an exceptional and urgent plan to protect forests and the ecological system against wildfires and climate change. The study provides a diagnosis that should encourage better protection and management of forest cover in Algeria.}, } @article {pmid37169026, year = {2022}, author = {Mirzadeh, SI and Arefeen, A and Ardo, J and Fallahzadeh, R and Minor, B and Lee, JA and Hildebrand, JA and Cook, D and Ghasemzadeh, H and Evangelista, LS}, title = {Use of machine learning to predict medication adherence in individuals at risk for atherosclerotic cardiovascular disease.}, journal = {Smart health (Amsterdam, Netherlands)}, volume = {26}, number = {}, pages = {}, pmid = {37169026}, issn = {2352-6483}, support = {R21 AG053162/AG/NIA NIH HHS/United States ; R21 NR015410/NR/NINR NIH HHS/United States ; }, abstract = {BACKGROUND: Medication nonadherence is a critical problem with severe implications in individuals at risk for atherosclerotic cardiovascular disease. Many studies have attempted to predict medication adherence in this population, but few, if any, have been effective in prediction, sug-gesting that essential risk factors remain unidentified.

OBJECTIVE: This study's objective was to (1) establish an accurate prediction model of medi-cation adherence in individuals at risk for atherosclerotic cardiovascular disease and (2) identify significant contributing factors to the predictive accuracy of medication adherence. In particular, we aimed to use only the baseline questionnaire data to assess medication adherence prediction feasibility.

METHODS: A sample of 40 individuals at risk for atherosclerotic cardiovascular disease was recruited for an eight-week feasibility study. After collecting baseline data, we recorded data from a pillbox that sent events to a cloud-based server. Health measures and medication use events were analyzed using machine learning algorithms to identify variables that best predict medication adherence.

RESULTS: Our adherence prediction model, based on only the ten most relevant variables, achieved an average error rate of 12.9%. Medication adherence was closely correlated with being encouraged to play an active role in their treatment, having confidence about what to do in an emergency, knowledge about their medications, and having a special person in their life.

CONCLUSIONS: Our results showed the significance of clinical and psychosocial factors for predicting medication adherence in people at risk for atherosclerotic cardiovascular diseases. Clini-cians and researchers can use these factors to stratify individuals to make evidence-based decisions to reduce the risks.}, } @article {pmid37168713, year = {2023}, author = {Shi, S and Jiang, Q and Jin, X and Wang, W and Liu, K and Chen, H and Liu, P and Zhou, W and Yao, S}, title = {A comparative analysis of near-infrared image colorization methods for low-power NVIDIA Jetson embedded systems.}, journal = {Frontiers in neurorobotics}, volume = {17}, number = {}, pages = {1143032}, pmid = {37168713}, issn = {1662-5218}, abstract = {The near-infrared (NIR) image obtained by an NIR camera is a grayscale image that is inconsistent with the human visual spectrum. It can be difficult to perceive the details of a scene from an NIR scene; thus, a method is required to convert them to visible images, providing color and texture information. In addition, a camera produces so much video data that it increases the pressure on the cloud server. Image processing can be done on an edge device, but the computing resources of edge devices are limited, and their power consumption constraints need to be considered. Graphics Processing Unit (GPU)-based NVIDIA Jetson embedded systems offer a considerable advantage over Central Processing Unit (CPU)-based embedded devices in inference speed. For this study, we designed an evaluation system that uses image quality, resource occupancy, and energy consumption metrics to verify the performance of different NIR image colorization methods on low-power NVIDIA Jetson embedded systems for practical applications. The performance of 11 image colorization methods on NIR image datasets was tested on three different configurations of NVIDIA Jetson boards. The experimental results indicate that the Pix2Pix method performs best, with a rate of 27 frames per second on the Jetson Xavier NX. This performance is sufficient to meet the requirements of real-time NIR image colorization.}, } @article {pmid37168541, year = {2022}, author = {Ko, S and Zhou, H and Zhou, JJ and Won, JH}, title = {High-Performance Statistical Computing in the Computing Environments of the 2020s.}, journal = {Statistical science : a review journal of the Institute of Mathematical Statistics}, volume = {37}, number = {4}, pages = {494-518}, pmid = {37168541}, issn = {0883-4237}, support = {R25 HD108136/HD/NICHD NIH HHS/United States ; R01 HG006139/HG/NHGRI NIH HHS/United States ; R21 HL150374/HL/NHLBI NIH HHS/United States ; R35 GM141798/GM/NIGMS NIH HHS/United States ; K01 DK106116/DK/NIDDK NIH HHS/United States ; }, abstract = {Technological advances in the past decade, hardware and software alike, have made access to high-performance computing (HPC) easier than ever. We review these advances from a statistical computing perspective. Cloud computing makes access to supercomputers affordable. Deep learning software libraries make programming statistical algorithms easy and enable users to write code once and run it anywhere-from a laptop to a workstation with multiple graphics processing units (GPUs) or a supercomputer in a cloud. Highlighting how these developments benefit statisticians, we review recent optimization algorithms that are useful for high-dimensional models and can harness the power of HPC. Code snippets are provided to demonstrate the ease of programming. We also provide an easy-to-use distributed matrix data structure suitable for HPC. Employing this data structure, we illustrate various statistical applications including large-scale positron emission tomography and ℓ1-regularized Cox regression. Our examples easily scale up to an 8-GPU workstation and a 720-CPU-core cluster in a cloud. As a case in point, we analyze the onset of type-2 diabetes from the UK Biobank with 200,000 subjects and about 500,000 single nucleotide polymorphisms using the HPC ℓ1-regularized Cox regression. Fitting this half-million-variate model takes less than 45 minutes and reconfirms known associations. To our knowledge, this is the first demonstration of the feasibility of penalized regression of survival outcomes at this scale.}, } @article {pmid37168441, year = {2023}, author = {Chatterjee, P and Bose, R and Banerjee, S and Roy, S}, title = {Enhancing Data Security of Cloud Based LMS.}, journal = {Wireless personal communications}, volume = {130}, number = {2}, pages = {1123-1139}, pmid = {37168441}, issn = {0929-6212}, abstract = {Around the world, the educational system is evolving. The new trend can be found in traditional classroom systems as well as digitalization systems. Cloud-based Learning Management Systems (LMS) will accelerate the educational industry forward in the next years because they can provide end-user with a versatile, convenient, secure, and cost-effective learning process. The cloud-based LMS approach is the most effective and proper learning model in the worldwide educational sector, particularly if the organization is in a state of depression owing to a global pandemic. It can be utilized over the internet with several users on the same platform. As a result, the initial requirement is important to enable to the LMS model. Despite its many advantages, LMS confronts challenges such as confidentiality, user acceptance, and traffic. In a pandemic like Covid 19, the entire planet depends on a safe LMS platform to establish student and instructor trust. Therefore, with this work, the attempt has been made to explain one LMS model that may provide its users with optimal security, a user-friendly environment, and quick access. This paper discusses the use of the cloud attack, and also cryptographic and steganographic security models and techniques to address these issues. There's also information on what kinds of security vulnerabilities or operations on cloud data are feasible, and also how to deal with them using various algorithms.}, } @article {pmid37167613, year = {2023}, author = {Yang, SY and Oh, YH}, title = {Video-Assisted Versus Traditional Problem-Based Learning: A Quasi-Experimental Study Among Pediatric Nursing Students.}, journal = {The journal of nursing research : JNR}, volume = {31}, number = {3}, pages = {e277}, pmid = {37167613}, issn = {1948-965X}, mesh = {Humans ; Child ; *Problem-Based Learning/methods ; *Students, Nursing/psychology ; Learning ; Thinking ; Pediatric Nursing ; }, abstract = {BACKGROUND: The text-assisted problem-based, methods traditionally used to teach nursing students cannot adequately simulate holistic clinical situations and patient symptoms. Although video-assisted, problem-based learning methods combined with text have shown positive results in terms of improving comprehension and cognitive abilities, some studies have shown these methods to be inferior to text-assisted methods in terms of promoting deep critical thinking in medical students.

PURPOSE: This study was designed to assess the benefits in nursing education of video-assisted, problem-based learning using online multimedia technologies compared with text-assisted, problem-based learning using traditional face-to-face classes.

METHODS: A quasi-experimental, nonequivalent control group, preintervention-and-postintervention design was used. The experimental group (n = 31) received video-assisted, problem-based learning materials with multimedia technologies (video scenarios, Google Docs worksheets, Google slides, Zoom cloud meetings, and e-learning management system) and weekly online lectures (100 minutes) for 4 weeks. The control group (n = 35) received text-assisted, problem-based learning materials with traditional face-to-face classes and weekly lectures (100 minutes) for 4 weeks. The study data were analyzed using chi-square, Fisher's exact, and independent t tests as well as analysis of variance.

RESULTS: At posttest, learning motivation (t = 3.25, p = .002), academic self-efficacy (t = 2.41, p = .019), and self-directed learning (t = 3.08, p = .003) were significantly higher in the experimental group than in the control group.

Video-assisted, problem-based learning using multimedia technologies was shown to be effective in increasing learning motivation, academic self-efficacy, and self-directed learning in nursing students. These findings have implications for the development and planning of contactless classes in response to the coronavirus pandemic. Notably, no intergroup differences were found in terms of problem-solving skills. Future studies should include in-depth reviews and assessments of the difficulties faced in producing problem scenarios as well as the methods of instruction.}, } @article {pmid37165050, year = {2023}, author = {Liu, N and Guo, D and Song, Z and Zhong, S and Hu, R}, title = {BIM-based digital platform and risk management system for mountain tunnel construction.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {7585}, pmid = {37165050}, issn = {2045-2322}, support = {2019M663648//the China Postdoctoral Science Foundation/ ; 2022JM-190//the Natural Science Basic Research Program of Shaanxi/ ; 52178393//the National Natural Science Foundation of China/ ; 2020TD-005//the Innovation Capability Support Plan of Shaanxi Province - Innovation Team/ ; 20JK0709//the Shaanxi Provincial Department of Education Project/ ; }, abstract = {During the construction of mountain tunnels, there are often various intricate and mutable potential hazards, the management and control of which are crucial to ensuring the safety of such construction. With the rapid advancement of engineering information technologies, including Building Information Model (BIM), the internet, big data, and cloud computing, dynamic management of mountain tunnel construction will inevitably become a prevailing trend. This paper proposes a new digital approach to realize the informatization and visualization of risk management in mountain tunnel construction, by combining monitoring measurement with advanced geological prediction based on BIM technology. The proposed approach suggests a BIM-based digital platform architecture for mountain tunnel construction, which is comprised of five layers-basic, model, data, application, and user. The integration of these five layers can realize risk management information during the construction of mountain tunnels. In addition, a set of dynamic risk management systems, including risk monitoring, identification, and assessment, can be established based on the digital platform. The digital platform and dynamic risk management system proposed in this paper have certain advantages in the construction of mountain tunnels, providing a new and significant way for the management of safety risks in such construction projects.}, } @article {pmid37133929, year = {2023}, author = {Li, R and Shen, M and Liu, H and Bai, L and Zhang, L}, title = {Do Infrared Thermometers Hold Promise for an Effective Early Warning System for Emerging Respiratory Infectious Diseases?.}, journal = {JMIR formative research}, volume = {7}, number = {}, pages = {e42548}, pmid = {37133929}, issn = {2561-326X}, abstract = {BACKGROUND: Major respiratory infectious diseases, such as influenza, SARS-CoV, and SARS-CoV-2, have caused historic global pandemics with severe disease and economic burdens. Early warning and timely intervention are key to suppress such outbreaks.

OBJECTIVE: We propose a theoretical framework for a community-based early warning (EWS) system that will proactively detect temperature abnormalities in the community based on a collective network of infrared thermometer-enabled smartphone devices.

METHODS: We developed a framework for a community-based EWS and demonstrated its operation with a schematic flowchart. We emphasize the potential feasibility of the EWS and potential obstacles.

RESULTS: Overall, the framework uses advanced artificial intelligence (AI) technology on cloud computing platforms to identify the probability of an outbreak in a timely manner. It hinges on the detection of geospatial temperature abnormalities in the community based on mass data collection, cloud-based computing and analysis, decision-making, and feedback. The EWS may be feasible for implementation considering its public acceptance, technical practicality, and value for money. However, it is important that the proposed framework work in parallel or in combination with other early warning mechanisms due to a relatively long initial model training process.

CONCLUSIONS: The framework, if implemented, may provide an important tool for important decisions for early prevention and control of respiratory diseases for health stakeholders.}, } @article {pmid37128501, year = {2023}, author = {Zhao, F and Peng, C and Xu, D and Liu, Y and Niu, K and Tang, H}, title = {Attribute-based multi-user collaborative searchable encryption in COVID-19.}, journal = {Computer communications}, volume = {205}, number = {}, pages = {118-126}, pmid = {37128501}, issn = {0140-3664}, abstract = {With the outbreak of COVID-19, the government has been forced to collect a large amount of detailed information about patients in order to effectively curb the epidemic of the disease, including private data of patients. Searchable encryption is an essential technology for ciphertext retrieval in cloud computing environments, and many searchable encryption schemes are based on attributes to control user's search permissions to protect their data privacy. The existing attribute-based searchable encryption (ABSE) scheme can only implement the situation where the search permission of one person meets the search policy and does not support users to obtain the search permission through collaboration. In this paper, we proposed a new attribute-based collaborative searchable encryption scheme in multi-user setting (ABCSE-MU), which takes the access tree as the access policy and introduces the translation nodes to implement collaborative search. The cooperation can only be reached on the translation node and the flexibility of search permission is achieved on the premise of data security. ABCSE-MU scheme solves the problem that a single user has insufficient search permissions but still needs to search, making the user's access policy more flexible. We use random blinding to ensure the confidentiality and security of the secret key, further prove that our scheme is secure under the Decisional Bilinear Diffie-Hellman (DBDH) assumption. Security analysis further shows that the scheme can ensure the confidentiality of data under chosen-keyword attacks and resist collusion attacks.}, } @article {pmid37128419, year = {2022}, author = {Jalali, A and Huang, SS and Kochendorfer, KM}, title = {Cloud Computing Synthetic Syndromic Surveillance Systems: Opioid Epidemic in Illinois.}, journal = {AMIA ... Annual Symposium proceedings. AMIA Symposium}, volume = {2022}, number = {}, pages = {580-586}, pmid = {37128419}, issn = {1942-597X}, mesh = {Humans ; *Analgesics, Opioid/administration & dosage/adverse effects/poisoning ; *Cloud Computing ; *Drug Overdose/drug therapy/epidemiology ; *Emergency Medical Services ; *Opioid Epidemic/statistics & numerical data ; *Sentinel Surveillance ; Databases, Factual ; Chicago/epidemiology ; Prognosis ; Male ; Female ; Middle Aged ; }, abstract = {With an increasing number of overdose cases yearly, the city of Chicago is facing an opioid epidemic. Many of these overdose cases lead to 911 calls that necessitate timely response from our limited emergency medicine services. This paper demonstrates how data from these calls along with synthetic and geospatial data can help create a syndromic surveillance system to combat this opioid crisis. Chicago EMS data is obtained from the Illinois Department of Public Health with a database structure using the NEMSIS standard. This information is combined with information from the RTI U.S. Household Population database, before being transferred to an Azure Data Lake. Afterwards, the data is integrated with Azure Synapse before being refined in another data lake and filtered with ICD-10 codes. Afterwards, we moved the data to ArcGIS Enterprise to apply spatial statistics and geospatial analytics to create our surveillance system.}, } @article {pmid37123982, year = {2023}, author = {Gomis, MKS and Oladinrin, OT and Saini, M and Pathirage, C and Arif, M}, title = {A scientometric analysis of global scientific literature on learning resources in higher education.}, journal = {Heliyon}, volume = {9}, number = {4}, pages = {e15438}, pmid = {37123982}, issn = {2405-8440}, abstract = {There is a significant increase in the literature on learning resources in Higher Education (HE) but very limited evidence of studies that have taken a global overview of the context, range, and emerging trends from the previous research. This study aims to conduct a Scientometric analysis of research articles to accommodate a global overview and research trends under the theme of learning resources in HE. 4489 scientific articles were obtained as the dataset from the Web Of Science database between 1970 and 2022. Network maps and critical data were obtained by conducting co-authorship analysis for authors, organisations and countries and co-occurrence analysis for keywords from the VOSviewer software. The study revealed that the USA had a significant research input, and Salamin, N. from the University of Lausanne was recognised as the most frequently published author. The University of Illinois, USA, has the highest contribution to research articles, and the most popular research hotspots and trends were e-learning, Education, Academic libraries, Learning resources, and Cloud computing. However, the most critical finding from the study is that there needs to be real collaboration within the research theme and suggests ways to improve collaborations to enhance learning resources in HE. This study may be the first to conduct a scientometric analysis of Learning Resources in Higher education. This study offers valuable insight to academics, academic institutions, researchers, policymakers and pedagogical statutory bodies to understand the current context of learning resources in HE and recognise further develop research, collaborations and policies by considering critical findings from the study.}, } @article {pmid37122827, year = {2023}, author = {Zhang, Y and Dong, H}, title = {Criminal law regulation of cyber fraud crimes-from the perspective of citizens' personal information protection in the era of edge computing.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {12}, number = {1}, pages = {64}, pmid = {37122827}, issn = {2192-113X}, abstract = {Currently, cloud computing provides users all over the globe with Information and Communication Technology facilities that are utility-oriented. This technology is trying to drive the development of data center design by designing and building them as networks of cloud machines, enabling users to access and run the application from any part of the globe. Cloud computing provides considerable benefits to organizations by providing rapid and adaptable ICT software and hardware systems, allowing them to concentrate on creating innovative business values for the facilities they provide. The right to privacy of big data has acquired new definitions with the continued advancement of cloud computing, and the techniques available to protect citizens' personal information under administrative law have managed to grow in a multitude. Because of the foregoing, internet fraud is a new type of crime that has emerged over time and is based on network technology. This paper analyzed and studied China's internet fraud governance capabilities, and made a comprehensive evaluation of them using cloud computing technology and the Analytic Hierarchy Process (AHP). This paper discussed personal information security and the improvement of criminal responsibility from the perspective of citizens' information security and designed and analyzed cases. In addition, this paper also analyzed and studied the ability of network fraud governance in the era of cloud computing. It also carried out a comprehensive evaluation and used the fuzzy comprehensive evaluation method to carry out the evaluation. A questionnaire survey was used to survey 100 residents in district X of city Z and district Y of the suburban area. Among the 100 people, almost all of them received scam calls or text messages, accounting for 99%, of which 8 were scammed. Among the people, more than 59.00% of the people expressed dissatisfaction with the government's Internet fraud satisfaction survey. Therefore, in the process of combating Internet fraud, the government still needs to step up its efforts.}, } @article {pmid37115834, year = {2023}, author = {Lin, BS and Peng, CW and Lee, IJ and Hsu, HK and Lin, BS}, title = {System Based on Artificial Intelligence Edge Computing for Detecting Bedside Falls and Sleep Posture.}, journal = {IEEE journal of biomedical and health informatics}, volume = {27}, number = {7}, pages = {3549-3558}, doi = {10.1109/JBHI.2023.3271463}, pmid = {37115834}, issn = {2168-2208}, mesh = {Humans ; Aged ; *Artificial Intelligence ; *Neural Networks, Computer ; Algorithms ; Posture ; Sleep ; Cloud Computing ; }, abstract = {Bedside falls and pressure ulcers are crucial issues in geriatric care. Although many bedside monitoring systems have been proposed, they are limited by the computational complexity of their algorithms. Moreover, most of the data collected by the sensors of these systems must be transmitted to a back-end server for calculation. With an increase in the demand for the Internet of Things, problems such as higher cost of bandwidth and overload of server computing are faced when using the aforementioned systems. To reduce the server workload, certain computing tasks must be offloaded from cloud servers to edge computing platforms. In this study, a bedside monitoring system based on neuromorphic computing hardware was developed to detect bedside falls and sleeping posture. The artificial intelligence neural network executed on the back-end server was simplified and used on an edge computing platform. An integer 8-bit-precision neural network model was deployed on the edge computing platform to process the thermal image captured by the thermopile array sensing element to conduct sleep posture classification and bed position detection. The bounding box of the bed was then converted into the features for posture classification correction to correct the posture. In an experimental evaluation, the accuracy rate, inferencing speed, and power consumption of the developed system were 94.56%, 5.28 frames per second, and 1.5 W, respectively. All the calculations of the developed system are conducted on an edge computing platform, and the developed system only transmits fall events to the back-end server through Wi-Fi and protects user privacy.}, } @article {pmid37112575, year = {2023}, author = {Gabhane, LR and Kanidarapu, N}, title = {Environmental Risk Assessment Using Neural Network in Liquefied Petroleum Gas Terminal.}, journal = {Toxics}, volume = {11}, number = {4}, pages = {}, pmid = {37112575}, issn = {2305-6304}, abstract = {The accidental release of toxic gases leads to fire, explosion, and acute toxicity, and may result in severe problems for people and the environment. The risk analysis of hazardous chemicals using consequence modelling is essential to improve the process reliability and safety of the liquefied petroleum gas (LPG) terminal. The previous researchers focused on single-mode failure for risk assessment. No study exists on LPG plant multimode risk analysis and threat zone prediction using machine learning. This study aims to evaluate the fire and explosion hazard potential of one of Asia's biggest LPG terminals in India. Areal locations of hazardous atmospheres (ALOHA) software simulations are used to generate threat zones for the worst scenarios. The same dataset is used to develop the artificial neural network (ANN) prediction model. The threats of flammable vapour cloud, thermal radiations from fire, and overpressure blast waves are estimated in two different weather conditions. A total of 14 LPG leak scenarios involving a 19 kg capacity cylinder, 21 tons capacity tank truck, 600 tons capacity mounded bullet, and 1350 tons capacity Horton sphere in the terminal are considered. Amongst all scenarios, the catastrophic rupture of the Horton sphere of 1350 MT capacity presented the most significant risk to life safety. Thermal flux of 37.5 kW/ m[2] from flames will damage nearby structures and equipment and spread fire by the domino effect. A novel soft computing technique called a threat and risk analysis-based ANN model has been developed to predict threat zone distances for LPG leaks. Based on the significance of incidents in the LPG terminal, 160 attributes were collected for the ANN modelling. The developed ANN model predicted the threat zone distance with an accuracy of R[2] value being 0.9958, and MSE being 202.9061 in testing. These results are evident in the reliability of the proposed framework for safety distance prediction. The LPG plant authorities can adopt this model to assess the safety distance from the hazardous chemical explosion based on the prior forecasted atmosphere conditions from the weather department.}, } @article {pmid37112349, year = {2023}, author = {Čilić, I and Krivić, P and Podnar Žarko, I and Kušek, M}, title = {Performance Evaluation of Container Orchestration Tools in Edge Computing Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {8}, pages = {}, pmid = {37112349}, issn = {1424-8220}, support = {IP-2019-04-1986//Croatian Science Foundation/ ; }, abstract = {Edge computing is a viable approach to improve service delivery and performance parameters by extending the cloud with resources placed closer to a given service environment. Numerous research papers in the literature have already identified the key benefits of this architectural approach. However, most results are based on simulations performed in closed network environments. This paper aims to analyze the existing implementations of processing environments containing edge resources, taking into account the targeted quality of service (QoS) parameters and the utilized orchestration platforms. Based on this analysis, the most popular edge orchestration platforms are evaluated in terms of their workflow that allows the inclusion of remote devices in the processing environment and their ability to adapt the logic of the scheduling algorithms to improve the targeted QoS attributes. The experimental results compare the performance of the platforms and show the current state of their readiness for edge computing in real network and execution environments. These findings suggest that Kubernetes and its distributions have the potential to provide effective scheduling across the resources on the network's edge. However, some challenges still have to be addressed to completely adapt these tools for such a dynamic and distributed execution environment as edge computing implies.}, } @article {pmid37112296, year = {2023}, author = {Kim, T and Yoo, SE and Kim, Y}, title = {Edge/Fog Computing Technologies for IoT Infrastructure II.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {8}, pages = {}, pmid = {37112296}, issn = {1424-8220}, support = {2020R1F1A1048179//National Research Foundation of Korea (NRF) grant funded by the Korea government (MSIT)/ ; Research Grant of Jeonju University in 2022//Research Grant of Jeonju University in 2022/ ; }, abstract = {The prevalence of smart devices and cloud computing has led to an explosion in the amount of data generated by IoT devices [...].}, } @article {pmid37112224, year = {2023}, author = {Zhu, M and Gao, S and Tu, G and Chen, D}, title = {Multi-Access Edge Computing (MEC) Based on MIMO: A Survey.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {8}, pages = {}, pmid = {37112224}, issn = {1424-8220}, support = {292021000242//Research Funds for the Central Universities/ ; 2017YFB0403604//National Key Research and Development Program of China/ ; 61571416//National Natural Science Foundation of China/ ; 61072045//National Natural Science Foundation of China/ ; 61032006//National Natural Science Foundation of China/ ; }, abstract = {With the rapid development of wireless communication technology and the emergence of intelligent applications, higher requirements have been put forward for data communication and computing capacity. Multi-access edge computing (MEC) can handle highly demanding applications by users by sinking the services and computing capabilities of the cloud to the edge of the cell. Meanwhile, the multiple input multiple output (MIMO) technology based on large-scale antenna arrays can achieve an order-of-magnitude improvement in system capacity. The introduction of MIMO into MEC takes full advantage of the energy and spectral efficiency of MIMO technology, providing a new computing paradigm for time-sensitive applications. In parallel, it can accommodate more users and cope with the inevitable trend of continuous data traffic explosion. In this paper, the state-of-the-art research status in this field is investigated, summarized and analyzed. Specifically, we first summarize a multi-base station cooperative mMIMO-MEC model that can easily be expanded to adapt to different MIMO-MEC application scenarios. Subsequently, we comprehensively analyze the current works, compare them to each other and summarize them, mainly from four aspects: research scenarios, application scenarios, evaluation indicators and research issues, and research algorithms. Finally, some open research challenges are identified and discussed, and these indicate the direction for future research on MIMO-MEC.}, } @article {pmid37112221, year = {2023}, author = {Oladimeji, D and Gupta, K and Kose, NA and Gundogan, K and Ge, L and Liang, F}, title = {Smart Transportation: An Overview of Technologies and Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {8}, pages = {}, pmid = {37112221}, issn = {1424-8220}, abstract = {As technology continues to evolve, our society is becoming enriched with more intelligent devices that help us perform our daily activities more efficiently and effectively. One of the most significant technological advancements of our time is the Internet of Things (IoT), which interconnects various smart devices (such as smart mobiles, intelligent refrigerators, smartwatches, smart fire alarms, smart door locks, and many more) allowing them to communicate with each other and exchange data seamlessly. We now use IoT technology to carry out our daily activities, for example, transportation. In particular, the field of smart transportation has intrigued researchers due to its potential to revolutionize the way we move people and goods. IoT provides drivers in a smart city with many benefits, including traffic management, improved logistics, efficient parking systems, and enhanced safety measures. Smart transportation is the integration of all these benefits into applications for transportation systems. However, as a way of further improving the benefits provided by smart transportation, other technologies have been explored, such as machine learning, big data, and distributed ledgers. Some examples of their application are the optimization of routes, parking, street lighting, accident prevention, detection of abnormal traffic conditions, and maintenance of roads. In this paper, we aim to provide a detailed understanding of the developments in the applications mentioned earlier and examine current researches that base their applications on these sectors. We aim to conduct a self-contained review of the different technologies used in smart transportation today and their respective challenges. Our methodology encompassed identifying and screening articles on smart transportation technologies and its applications. To identify articles addressing our topic of review, we searched for articles in the four significant databases: IEEE Xplore, ACM Digital Library, Science Direct, and Springer. Consequently, we examined the communication mechanisms, architectures, and frameworks that enable these smart transportation applications and systems. We also explored the communication protocols enabling smart transportation, including Wi-Fi, Bluetooth, and cellular networks, and how they contribute to seamless data exchange. We delved into the different architectures and frameworks used in smart transportation, including cloud computing, edge computing, and fog computing. Lastly, we outlined current challenges in the smart transportation field and suggested potential future research directions. We will examine data privacy and security issues, network scalability, and interoperability between different IoT devices.}, } @article {pmid37100543, year = {2023}, author = {Alberto, IRI and Alberto, NRI and Ghosh, AK and Jain, B and Jayakumar, S and Martinez-Martin, N and McCague, N and Moukheiber, D and Moukheiber, L and Moukheiber, M and Moukheiber, S and Yaghy, A and Zhang, A and Celi, LA}, title = {The impact of commercial health datasets on medical research and health-care algorithms.}, journal = {The Lancet. Digital health}, volume = {5}, number = {5}, pages = {e288-e294}, pmid = {37100543}, issn = {2589-7500}, support = {R01 EB017205/EB/NIBIB NIH HHS/United States ; R56 EB017205/EB/NIBIB NIH HHS/United States ; }, mesh = {Humans ; *Algorithms ; *Biomedical Research ; Privacy ; Reproducibility of Results ; *Datasets as Topic/economics/ethics/trends ; Consumer Health Information/economics/ethics ; }, abstract = {As the health-care industry emerges into a new era of digital health driven by cloud data storage, distributed computing, and machine learning, health-care data have become a premium commodity with value for private and public entities. Current frameworks of health data collection and distribution, whether from industry, academia, or government institutions, are imperfect and do not allow researchers to leverage the full potential of downstream analytical efforts. In this Health Policy paper, we review the current landscape of commercial health data vendors, with special emphasis on the sources of their data, challenges associated with data reproducibility and generalisability, and ethical considerations for data vending. We argue for sustainable approaches to curating open-source health data to enable global populations to be included in the biomedical research community. However, to fully implement these approaches, key stakeholders should come together to make health-care datasets increasingly accessible, inclusive, and representative, while balancing the privacy and rights of individuals whose data are being collected.}, } @article {pmid37093388, year = {2023}, author = {Mohinuddin, S and Sengupta, S and Sarkar, B and Saha, UD and Islam, A and Islam, ARMT and Hossain, ZM and Mahammad, S and Ahamed, T and Mondal, R and Zhang, W and Basra, A}, title = {Assessing lake water quality during COVID-19 era using geospatial techniques and artificial neural network model.}, journal = {Environmental science and pollution research international}, volume = {30}, number = {24}, pages = {65848-65864}, pmid = {37093388}, issn = {1614-7499}, mesh = {Humans ; *Water Quality ; Lakes ; *COVID-19 ; Environmental Monitoring/methods ; Communicable Disease Control ; Chlorophyll/analysis ; Neural Networks, Computer ; Phosphorus/analysis ; }, abstract = {The present study evaluates the impact of the COVID-19 lockdown on the water quality of a tropical lake (East Kolkata Wetland or EKW, India) along with seasonal change using Landsat 8 and 9 images of the Google Earth Engine (GEE) cloud computing platform. The research focuses on detecting, monitoring, and predicting water quality in the EKW region using eight parameters-normalized suspended material index (NSMI), suspended particular matter (SPM), total phosphorus (TP), electrical conductivity (EC), chlorophyll-α, floating algae index (FAI), turbidity, Secchi disk depth (SDD), and two water quality indices such as Carlson tropic state index (CTSI) and entropy‑weighted water quality index (EWQI). The results demonstrate that SPM, turbidity, EC, TP, and SDD improved while the FAI and chlorophyll-α increased during the lockdown period due to the stagnation of water as well as a reduction in industrial and anthropogenic pollution. Moreover, the prediction of EWQI using an artificial neural network indicates that the overall water quality will improve more if the lockdown period is sustained for another 3 years. The outcomes of the study will help the stakeholders develop effective regulations and strategies for the timely restoration of lake water quality.}, } @article {pmid37090033, year = {2023}, author = {Ji, JL and Demšar, J and Fonteneau, C and Tamayo, Z and Pan, L and Kraljič, A and Matkovič, A and Purg, N and Helmer, M and Warrington, S and Winkler, A and Zerbi, V and Coalson, TS and Glasser, MF and Harms, MP and Sotiropoulos, SN and Murray, JD and Anticevic, A and Repovš, G}, title = {QuNex-An integrative platform for reproducible neuroimaging analytics.}, journal = {Frontiers in neuroinformatics}, volume = {17}, number = {}, pages = {1104508}, pmid = {37090033}, issn = {1662-5196}, support = {R01 MH060974/MH/NIMH NIH HHS/United States ; U19 AG073585/AG/NIA NIH HHS/United States ; }, abstract = {INTRODUCTION: Neuroimaging technology has experienced explosive growth and transformed the study of neural mechanisms across health and disease. However, given the diversity of sophisticated tools for handling neuroimaging data, the field faces challenges in method integration, particularly across multiple modalities and species. Specifically, researchers often have to rely on siloed approaches which limit reproducibility, with idiosyncratic data organization and limited software interoperability.

METHODS: To address these challenges, we have developed Quantitative Neuroimaging Environment & Toolbox (QuNex), a platform for consistent end-to-end processing and analytics. QuNex provides several novel functionalities for neuroimaging analyses, including a "turnkey" command for the reproducible deployment of custom workflows, from onboarding raw data to generating analytic features.

RESULTS: The platform enables interoperable integration of multi-modal, community-developed neuroimaging software through an extension framework with a software development kit (SDK) for seamless integration of community tools. Critically, it supports high-throughput, parallel processing in high-performance compute environments, either locally or in the cloud. Notably, QuNex has successfully processed over 10,000 scans across neuroimaging consortia, including multiple clinical datasets. Moreover, QuNex enables integration of human and non-human workflows via a cohesive translational platform.

DISCUSSION: Collectively, this effort stands to significantly impact neuroimaging method integration across acquisition approaches, pipelines, datasets, computational environments, and species. Building on this platform will enable more rapid, scalable, and reproducible impact of neuroimaging technology across health and disease.}, } @article {pmid37089762, year = {2023}, author = {Pongpech, WA}, title = {A Distributed Data Mesh Paradigm for an Event-based Smart Communities Monitoring Product.}, journal = {Procedia computer science}, volume = {220}, number = {}, pages = {584-591}, pmid = {37089762}, issn = {1877-0509}, abstract = {The recent pandemic events in Thailand, Covid-19 in 2018, demonstrated the need for an event-based smart monitoring system. While a distributed multi-level architecture has emerged as an architecture of choice for a larger-scale smart event-based system that requires better latency, security, scalability, and reliability, a recently introduced data mesh paradigm can add a few additional benefits. The paradigm enables each district to become an event-based smart monitoring mesh and handle its analytics and monitoring workload. Districts can form a set of domains in a network of event-based smart community monitoring systems and provide data products for others during a crisis. This paper presents a distributed data mesh paradigm for an event-based smart monitoring product in a given community with predefined domains. The paper presents smart monitoring as a data product between domains. Key considerations for designing an event-based smart monitoring data product are given. The author introduces three possible domains necessary for creating a smart monitoring system in each community. Each domain creates a data product for a given domain and shares data between domains. Finally, a three-layer analytics architecture for a smart monitoring product in each domain and a use case is presented.}, } @article {pmid37087660, year = {2023}, author = {Zhao, ZK and Tian, YS and Weng, XX and Li, HW and Sun, WY}, title = {Temporal and spatial variation characteristics of surface water area in the Yellow River Basin from 1986 to 2021.}, journal = {Ying yong sheng tai xue bao = The journal of applied ecology}, volume = {34}, number = {3}, pages = {761-769}, doi = {10.13287/j.1001-9332.202303.021}, pmid = {37087660}, issn = {1001-9332}, mesh = {Humans ; *Water ; *Environmental Monitoring ; Rivers ; Climate Change ; Algorithms ; China ; }, abstract = {The Yellow River Basin is short of water resources. The dynamic monitoring of surface water area is helpful to clarify the distribution and change trend of water resources in this area. It is of great scientific significance to deeply understand the impacts of climate change and human activities on water resources and ensure the ecological security of the basin. Based on the Google Earth Engine (GEE) cloud platform, we analyzed the spatial variations of surface water area in the Yellow River Basin from 1986 to 2021 by using the mixed index algorithm, and revealed the driving factors of surface water area change in the Yellow River Basin. The results showed that the overall recognition accuracy of the water extraction algorithm based on mixing index was 97.5%. Compared with available water data products, the proposed algorithm can guarantee the integrity of the whole water area to a certain extent. The surface water area in the upper, middle, and lower reaches of the Yellow River Basin was 71.7%, 18.4%, and 9.9% of the total surface water area, respectively. From 1986 to 2021, the surface water area of the basin showed an overall upward trend, with a total increase of 3163.6 km[2]. The surface water area of the upper, middle, and downstream regions increased by 72.0%, 22.4%, and 5.6%, respectively. The increase of precipitation was the main reason for the increase of water area, with a contribution of 55%. Vegetation restoration and construction of water conservancy projects had increased the water area of the basin. The intensification of human water extraction activity reduced the water area of the basin.}, } @article {pmid37085488, year = {2023}, author = {Harle, N and Shtanko, O and Movassagh, R}, title = {Observing and braiding topological Majorana modes on programmable quantum simulators.}, journal = {Nature communications}, volume = {14}, number = {1}, pages = {2286}, pmid = {37085488}, issn = {2041-1723}, abstract = {Electrons are indivisible elementary particles, yet paradoxically a collection of them can act as a fraction of a single electron, exhibiting exotic and useful properties. One such collective excitation, known as a topological Majorana mode, is naturally stable against perturbations, such as unwanted local noise, and can thereby robustly store quantum information. As such, Majorana modes serve as the basic primitive of topological quantum computing, providing resilience to errors. However, their demonstration on quantum hardware has remained elusive. Here, we demonstrate a verifiable identification and braiding of topological Majorana modes using a superconducting quantum processor as a quantum simulator. By simulating fermions on a one-dimensional lattice subject to a periodic drive, we confirm the existence of Majorana modes localized at the edges, and distinguish them from other trivial modes. To simulate a basic logical operation of topological quantum computing known as braiding, we propose a non-adiabatic technique, whose implementation reveals correct braiding statistics in our experiments. This work could further be used to study topological models of matter using circuit-based simulations, and shows that long-sought quantum phenomena can be realized by anyone in cloud-run quantum simulations, whereby accelerating fundamental discoveries in quantum science and technology.}, } @article {pmid37079367, year = {2023}, author = {Afshar, M and Adelaine, S and Resnik, F and Mundt, MP and Long, J and Leaf, M and Ampian, T and Wills, GJ and Schnapp, B and Chao, M and Brown, R and Joyce, C and Sharma, B and Dligach, D and Burnside, ES and Mahoney, J and Churpek, MM and Patterson, BW and Liao, F}, title = {Deployment of Real-time Natural Language Processing and Deep Learning Clinical Decision Support in the Electronic Health Record: Pipeline Implementation for an Opioid Misuse Screener in Hospitalized Adults.}, journal = {JMIR medical informatics}, volume = {11}, number = {}, pages = {e44977}, pmid = {37079367}, issn = {2291-9694}, support = {R01 DA051464/DA/NIDA NIH HHS/United States ; R01 DK126933/DK/NIDDK NIH HHS/United States ; R01 LM010090/LM/NLM NIH HHS/United States ; UL1 TR002373/TR/NCATS NIH HHS/United States ; }, abstract = {BACKGROUND: The clinical narrative in electronic health records (EHRs) carries valuable information for predictive analytics; however, its free-text form is difficult to mine and analyze for clinical decision support (CDS). Large-scale clinical natural language processing (NLP) pipelines have focused on data warehouse applications for retrospective research efforts. There remains a paucity of evidence for implementing NLP pipelines at the bedside for health care delivery.

OBJECTIVE: We aimed to detail a hospital-wide, operational pipeline to implement a real-time NLP-driven CDS tool and describe a protocol for an implementation framework with a user-centered design of the CDS tool.

METHODS: The pipeline integrated a previously trained open-source convolutional neural network model for screening opioid misuse that leveraged EHR notes mapped to standardized medical vocabularies in the Unified Medical Language System. A sample of 100 adult encounters were reviewed by a physician informaticist for silent testing of the deep learning algorithm before deployment. An end user interview survey was developed to examine the user acceptability of a best practice alert (BPA) to provide the screening results with recommendations. The planned implementation also included a human-centered design with user feedback on the BPA, an implementation framework with cost-effectiveness, and a noninferiority patient outcome analysis plan.

RESULTS: The pipeline was a reproducible workflow with a shared pseudocode for a cloud service to ingest, process, and store clinical notes as Health Level 7 messages from a major EHR vendor in an elastic cloud computing environment. Feature engineering of the notes used an open-source NLP engine, and the features were fed into the deep learning algorithm, with the results returned as a BPA in the EHR. On-site silent testing of the deep learning algorithm demonstrated a sensitivity of 93% (95% CI 66%-99%) and specificity of 92% (95% CI 84%-96%), similar to published validation studies. Before deployment, approvals were received across hospital committees for inpatient operations. Five interviews were conducted; they informed the development of an educational flyer and further modified the BPA to exclude certain patients and allow the refusal of recommendations. The longest delay in pipeline development was because of cybersecurity approvals, especially because of the exchange of protected health information between the Microsoft (Microsoft Corp) and Epic (Epic Systems Corp) cloud vendors. In silent testing, the resultant pipeline provided a BPA to the bedside within minutes of a provider entering a note in the EHR.

CONCLUSIONS: The components of the real-time NLP pipeline were detailed with open-source tools and pseudocode for other health systems to benchmark. The deployment of medical artificial intelligence systems in routine clinical care presents an important yet unfulfilled opportunity, and our protocol aimed to close the gap in the implementation of artificial intelligence-driven CDS.

TRIAL REGISTRATION: ClinicalTrials.gov NCT05745480; https://www.clinicaltrials.gov/ct2/show/NCT05745480.}, } @article {pmid37079353, year = {2023}, author = {Florensa, D and Mateo-Fornes, J and Lopez Sorribes, S and Torres Tuca, A and Solsona, F and Godoy, P}, title = {Exploring Cancer Incidence, Risk Factors, and Mortality in the Lleida Region: Interactive, Open-source R Shiny Application for Cancer Data Analysis.}, journal = {JMIR cancer}, volume = {9}, number = {}, pages = {e44695}, pmid = {37079353}, issn = {2369-1999}, abstract = {BACKGROUND: The cancer incidence rate is essential to public health surveillance. The analysis of this information allows authorities to know the cancer situation in their regions, especially to determine cancer patterns, monitor cancer trends, and help prioritize the allocation of health resource.

OBJECTIVE: This study aimed to present the design and implementation of an R Shiny application to assist cancer registries conduct rapid descriptive and predictive analytics in a user-friendly, intuitive, portable, and scalable way. Moreover, we wanted to describe the design and implementation road map to inspire other population registries to exploit their data sets and develop similar tools and models.

METHODS: The first step was to consolidate the data into the population registry cancer database. These data were cross validated by ASEDAT software, checked later, and reviewed by experts. Next, we developed an online tool to visualize the data and generate reports to assist decision-making under the R Shiny framework. Currently, the application can generate descriptive analytics using population variables, such as age, sex, and cancer type; cancer incidence in region-level geographical heat maps; line plots to visualize temporal trends; and typical risk factor plots. The application also showed descriptive plots about cancer mortality in the Lleida region. This web platform was built as a microservices cloud platform. The web back end consists of an application programming interface and a database, which NodeJS and MongoDB have implemented. All these parts were encapsulated and deployed by Docker and Docker Compose.

RESULTS: The results provide a successful case study in which the tool was applied to the cancer registry of the Lleida region. The study illustrates how researchers and cancer registries can use the application to analyze cancer databases. Furthermore, the results highlight the analytics related to risk factors, second tumors, and cancer mortality. The application shows the incidence and evolution of each cancer during a specific period for gender, age groups, and cancer location, among other functionalities. The risk factors view permitted us to detect that approximately 60% of cancer patients were diagnosed with excess weight at diagnosis. Regarding mortality, the application showed that lung cancer registered the highest number of deaths for both genders. Breast cancer was the lethal cancer in women. Finally, a customization guide was included as a result of this implementation to deploy the architecture presented.

CONCLUSIONS: This paper aimed to document a successful methodology for exploiting the data in population cancer registries and propose guidelines for other similar records to develop similar tools. We intend to inspire other entities to build an application that can help decision-making and make data more accessible and transparent for the community of users.}, } @article {pmid37073281, year = {2023}, author = {Touckia, JK}, title = {Integrating the digital twin concept into the evaluation of reconfigurable manufacturing systems (RMS): literature review and research trend.}, journal = {The International journal, advanced manufacturing technology}, volume = {126}, number = {3-4}, pages = {875-889}, pmid = {37073281}, issn = {0268-3768}, abstract = {With the rapid advent of new information technologies (Big Data analytics, cyber-physical systems, such as IoT, cloud computing and artificial intelligence), digital twins are being used more and more in smart manufacturing. Despite the fact that their use in industry has attracted the attention of many practitioners and researchers, there is still a need for an integrated and comprehensive digital twin framework for reconfigurable manufacturing systems. To close this research gap, we present evidence from a systematic literature review, including 76 papers from high-quality journals. This paper presents the current research trends on evaluation and the digital twin in reconfigurable manufacturing systems, highlighting application areas and key methodologies and tools. The originality of this paper lies in its proposal of interesting avenues for future research on the integration of the digital twin in the evaluation of RMS. The benefits of digital twins are multiple such as evaluation of current and future capabilities of an RMS during its life cycle, early discovery of system performance deficiencies and production optimization. The idea is to implement a digital twin that links the virtual and physical environments. Finally, important issues and emerging trends in the literature are highlighted to encourage researchers and practitioners to develop studies in this area that are strongly related to the Industry 4.0 environment.}, } @article {pmid37066421, year = {2023}, author = {Hitz, BC and Jin-Wook, L and Jolanki, O and Kagda, MS and Graham, K and Sud, P and Gabdank, I and Strattan, JS and Sloan, CA and Dreszer, T and Rowe, LD and Podduturi, NR and Malladi, VS and Chan, ET and Davidson, JM and Ho, M and Miyasato, S and Simison, M and Tanaka, F and Luo, Y and Whaling, I and Hong, EL and Lee, BT and Sandstrom, R and Rynes, E and Nelson, J and Nishida, A and Ingersoll, A and Buckley, M and Frerker, M and Kim, DS and Boley, N and Trout, D and Dobin, A and Rahmanian, S and Wyman, D and Balderrama-Gutierrez, G and Reese, F and Durand, NC and Dudchenko, O and Weisz, D and Rao, SSP and Blackburn, A and Gkountaroulis, D and Sadr, M and Olshansky, M and Eliaz, Y and Nguyen, D and Bochkov, I and Shamim, MS and Mahajan, R and Aiden, E and Gingeras, T and Heath, S and Hirst, M and Kent, WJ and Kundaje, A and Mortazavi, A and Wold, B and Cherry, JM}, title = {The ENCODE Uniform Analysis Pipelines.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {37066421}, issn = {2692-8205}, support = {R01 HG009318/HG/NHGRI NIH HHS/United States ; U24 HG009397/HG/NHGRI NIH HHS/United States ; UM1 HG009375/HG/NHGRI NIH HHS/United States ; }, abstract = {The Encyclopedia of DNA elements (ENCODE) project is a collaborative effort to create a comprehensive catalog of functional elements in the human genome. The current database comprises more than 19000 functional genomics experiments across more than 1000 cell lines and tissues using a wide array of experimental techniques to study the chromatin structure, regulatory and transcriptional landscape of the Homo sapiens and Mus musculus genomes. All experimental data, metadata, and associated computational analyses created by the ENCODE consortium are submitted to the Data Coordination Center (DCC) for validation, tracking, storage, and distribution to community resources and the scientific community. The ENCODE project has engineered and distributed uniform processing pipelines in order to promote data provenance and reproducibility as well as allow interoperability between genomic resources and other consortia. All data files, reference genome versions, software versions, and parameters used by the pipelines are captured and available via the ENCODE Portal. The pipeline code, developed using Docker and Workflow Description Language (WDL; https://openwdl.org/) is publicly available in GitHub, with images available on Dockerhub (https://hub.docker.com), enabling access to a diverse range of biomedical researchers. ENCODE pipelines maintained and used by the DCC can be installed to run on personal computers, local HPC clusters, or in cloud computing environments via Cromwell. Access to the pipelines and data via the cloud allows small labs the ability to use the data or software without access to institutional compute clusters. Standardization of the computational methodologies for analysis and quality control leads to comparable results from different ENCODE collections - a prerequisite for successful integrative analyses.}, } @article {pmid37066386, year = {2023}, author = {Olson, RH and Kalafut, NC and Wang, D}, title = {MANGEM: a web app for Multimodal Analysis of Neuronal Gene expression, Electrophysiology and Morphology.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {37066386}, issn = {2692-8205}, support = {R21 NS127432/NS/NINDS NIH HHS/United States ; R21 NS128761/NS/NINDS NIH HHS/United States ; P50 HD105353/HD/NICHD NIH HHS/United States ; R01 AG067025/AG/NIA NIH HHS/United States ; R03 NS123969/NS/NINDS NIH HHS/United States ; RF1 MH128695/MH/NIMH NIH HHS/United States ; }, abstract = {Single-cell techniques have enabled the acquisition of multi-modal data, particularly for neurons, to characterize cellular functions. Patch-seq, for example, combines patch-clamp recording, cell imaging, and single-cell RNA-seq to obtain electrophysiology, morphology, and gene expression data from a single neuron. While these multi-modal data offer potential insights into neuronal functions, they can be heterogeneous and noisy. To address this, machine-learning methods have been used to align cells from different modalities onto a low-dimensional latent space, revealing multi-modal cell clusters. However, the use of those methods can be challenging for biologists and neuroscientists without computational expertise and also requires suitable computing infrastructure for computationally expensive methods. To address these issues, we developed a cloud-based web application, MANGEM (Multimodal Analysis of Neuronal Gene expression, Electrophysiology, and Morphology) at https://ctc.waisman.wisc.edu/mangem. MANGEM provides a step-by-step accessible and user-friendly interface to machine-learning alignment methods of neuronal multi-modal data while enabling real-time visualization of characteristics of raw and aligned cells. It can be run asynchronously for large-scale data alignment, provides users with various downstream analyses of aligned cells and visualizes the analytic results such as identifying multi-modal cell clusters of cells and detecting correlated genes with electrophysiological and morphological features. We demonstrated the usage of MANGEM by aligning Patch-seq multimodal data of neuronal cells in the mouse visual cortex.}, } @article {pmid37064531, year = {2023}, author = {Horsley, JJ and Thomas, RH and Chowdhury, FA and Diehl, B and McEvoy, AW and Miserocchi, A and de Tisi, J and Vos, SB and Walker, MC and Winston, GP and Duncan, JS and Wang, Y and Taylor, PN}, title = {Complementary structural and functional abnormalities to localise epileptogenic tissue.}, journal = {ArXiv}, volume = {}, number = {}, pages = {}, pmid = {37064531}, issn = {2331-8422}, support = {/WT_/Wellcome Trust/United Kingdom ; MR/V034758/1/MRC_/Medical Research Council/United Kingdom ; U01 NS090407/NS/NINDS NIH HHS/United States ; }, abstract = {BACKGROUND: When investigating suitability for epilepsy surgery, people with drug-refractory focal epilepsy may have intracranial EEG (iEEG) electrodes implanted to localise seizure onset. Diffusion-weighted magnetic resonance imaging (dMRI) may be acquired to identify key white matter tracts for surgical avoidance. Here, we investigate whether structural connectivity abnormalities, inferred from dMRI, may be used in conjunction with functional iEEG abnormalities to aid localisation of the epileptogenic zone (EZ), improving surgical outcomes in epilepsy.

METHODS: We retrospectively investigated data from 43 patients with epilepsy who had surgery following iEEG. Twenty-five patients (58%) were free from disabling seizures (ILAE 1 or 2) at one year. Interictal iEEG functional, and dMRI structural connectivity abnormalities were quantified by comparison to a normative map and healthy controls. We explored whether the resection of maximal abnormalities related to improved surgical outcomes, in both modalities individually and concurrently. Additionally, we suggest how connectivity abnormalities may inform the placement of iEEG electrodes pre-surgically using a patient case study.

FINDINGS: Seizure freedom was 15 times more likely in patients with resection of maximal connectivity and iEEG abnormalities (p=0.008). Both modalities separately distinguished patient surgical outcome groups and when used simultaneously, a decision tree correctly separated 36 of 43 (84%) patients.

INTERPRETATION: Our results suggest that both connectivity and iEEG abnormalities may localise epileptogenic tissue, and that these two modalities may provide complementary information in pre-surgical evaluations.

FUNDING: This research was funded by UKRI, CDT in Cloud Computing for Big Data, NIH, MRC, Wellcome Trust and Epilepsy Research UK.}, } @article {pmid37063644, year = {2023}, author = {Dong, C and Li, TZ and Xu, K and Wang, Z and Maldonado, F and Sandler, K and Landman, BA and Huo, Y}, title = {Characterizing browser-based medical imaging AI with serverless edge computing: towards addressing clinical data security constraints.}, journal = {Proceedings of SPIE--the International Society for Optical Engineering}, volume = {12469}, number = {}, pages = {}, pmid = {37063644}, issn = {0277-786X}, support = {UL1 TR000445/TR/NCATS NIH HHS/United States ; R01 EB017230/EB/NIBIB NIH HHS/United States ; U01 CA152662/CA/NCI NIH HHS/United States ; F30 CA275020/CA/NCI NIH HHS/United States ; R01 CA253923/CA/NCI NIH HHS/United States ; UL1 RR024975/RR/NCRR NIH HHS/United States ; U01 CA196405/CA/NCI NIH HHS/United States ; }, abstract = {Artificial intelligence (AI) has been widely introduced to various medical imaging applications ranging from disease visualization to medical decision support. However, data privacy has become an essential concern in clinical practice of deploying the deep learning algorithms through cloud computing. The sensitivity of patient health information (PHI) commonly limits network transfer, installation of bespoke desktop software, and access to computing resources. Serverless edge-computing shed light on privacy preserved model distribution maintaining both high flexibility (as cloud computing) and security (as local deployment). In this paper, we propose a browser-based, cross-platform, and privacy preserved medical imaging AI deployment system working on consumer-level hardware via serverless edge-computing. Briefly we implement this system by deploying a 3D medical image segmentation model for computed tomography (CT) based lung cancer screening. We further curate tradeoffs in model complexity and data size by characterizing the speed, memory usage, and limitations across various operating systems and browsers. Our implementation achieves a deployment with (1) a 3D convolutional neural network (CNN) on CT volumes (256×256×256 resolution), (2) an average runtime of 80 seconds across Firefox v.102.0.1/Chrome v.103.0.5060.114/Microsoft Edge v.103.0.1264.44 and 210 seconds on Safari v.14.1.1, and (3) an average memory usage of 1.5 GB on Microsoft Windows laptops, Linux workstation, and Apple Mac laptops. In conclusion, this work presents a privacy-preserved solution for medical imaging AI applications that minimizes the risk of PHI exposure. We characterize the tools, architectures, and parameters of our framework to facilitate the translation of modern deep learning methods into routine clinical care.}, } @article {pmid37055366, year = {2023}, author = {Zhang, H and Wang, LC and Chaudhuri, S and Pickering, A and Usvyat, L and Larkin, J and Waguespack, P and Kuang, Z and Kooman, JP and Maddux, FW and Kotanko, P}, title = {Real-time prediction of intradialytic hypotension using machine learning and cloud computing infrastructure.}, journal = {Nephrology, dialysis, transplantation : official publication of the European Dialysis and Transplant Association - European Renal Association}, volume = {38}, number = {7}, pages = {1761-1769}, pmid = {37055366}, issn = {1460-2385}, mesh = {Humans ; *Kidney Failure, Chronic/therapy/complications ; Prospective Studies ; Cloud Computing ; *Hypotension/diagnosis/etiology ; Renal Dialysis/adverse effects ; Blood Pressure ; }, abstract = {BACKGROUND: In maintenance hemodialysis patients, intradialytic hypotension (IDH) is a frequent complication that has been associated with poor clinical outcomes. Prediction of IDH may facilitate timely interventions and eventually reduce IDH rates.

METHODS: We developed a machine learning model to predict IDH in in-center hemodialysis patients 15-75 min in advance. IDH was defined as systolic blood pressure (SBP) <90 mmHg. Demographic, clinical, treatment-related and laboratory data were retrieved from electronic health records and merged with intradialytic machine data that were sent in real-time to the cloud. For model development, dialysis sessions were randomly split into training (80%) and testing (20%) sets. The area under the receiver operating characteristic curve (AUROC) was used as a measure of the model's predictive performance.

RESULTS: We utilized data from 693 patients who contributed 42 656 hemodialysis sessions and 355 693 intradialytic SBP measurements. IDH occurred in 16.2% of hemodialysis treatments. Our model predicted IDH 15-75 min in advance with an AUROC of 0.89. Top IDH predictors were the most recent intradialytic SBP and IDH rate, as well as mean nadir SBP of the previous 10 dialysis sessions.

CONCLUSIONS: Real-time prediction of IDH during an ongoing hemodialysis session is feasible and has a clinically actionable predictive performance. If and to what degree this predictive information facilitates the timely deployment of preventive interventions and translates into lower IDH rates and improved patient outcomes warrants prospective studies.}, } @article {pmid37055332, year = {2023}, author = {Servida, F and Fischer, M and Delémont, O and Souvignet, TR}, title = {Ok Google, Start a Fire. IoT devices as witnesses and actors in fire investigations.}, journal = {Forensic science international}, volume = {348}, number = {}, pages = {111674}, doi = {10.1016/j.forsciint.2023.111674}, pmid = {37055332}, issn = {1872-6283}, abstract = {Fire incidents are amongst the most destructive events an investigator might encounter, completely transforming a scene with most of the objects left in ashes or highly damaged. Until now, fire investigations relied heavily on burn patterns and electrical artifacts to find possible starting locations, as well as witness statements and more recently witness imagery. As Internet of Things (IoT) devices, often seen as connected smart devices, become more common, the various sensors embedded within them provide a novel source of traces about the environment and events within. They collect and store information in different locations, often not touched by the event, such as remote servers (cloud) or companion smartphones, widening the investigation field for fire incidents. This work presents two controlled fire incidents in apartments that we furnished, equipped with IoT devices, and subsequently burnt. We studied the traces retrievable from the objects themselves after the incident, the companion smartphone apps, and the cloud and assessed the value of the information they conveyed. This research highlighted the pertinence to consider traces from IoT devices in the forensic process of fire investigation.}, } @article {pmid37053173, year = {2023}, author = {Asim Shahid, M and Alam, MM and Mohd Su'ud, M}, title = {Improved accuracy and less fault prediction errors via modified sequential minimal optimization algorithm.}, journal = {PloS one}, volume = {18}, number = {4}, pages = {e0284209}, pmid = {37053173}, issn = {1932-6203}, mesh = {Bayes Theorem ; *Algorithms ; *Machine Learning ; Random Forest ; Support Vector Machine ; }, abstract = {The benefits and opportunities offered by cloud computing are among the fastest-growing technologies in the computer industry. Additionally, it addresses the difficulties and issues that make more users more likely to accept and use the technology. The proposed research comprised of machine learning (ML) algorithms is Naïve Bayes (NB), Library Support Vector Machine (LibSVM), Multinomial Logistic Regression (MLR), Sequential Minimal Optimization (SMO), K Nearest Neighbor (KNN), and Random Forest (RF) to compare the classifier gives better results in accuracy and less fault prediction. In this research, the secondary data results (CPU-Mem Mono) give the highest percentage of accuracy and less fault prediction on the NB classifier in terms of 80/20 (77.01%), 70/30 (76.05%), and 5 folds cross-validation (74.88%), and (CPU-Mem Multi) in terms of 80/20 (89.72%), 70/30 (90.28%), and 5 folds cross-validation (92.83%). Furthermore, on (HDD Mono) the SMO classifier gives the highest percentage of accuracy and less fault prediction fault in terms of 80/20 (87.72%), 70/30 (89.41%), and 5 folds cross-validation (88.38%), and (HDD-Multi) in terms of 80/20 (93.64%), 70/30 (90.91%), and 5 folds cross-validation (88.20%). Whereas, primary data results found RF classifier gives the highest percentage of accuracy and less fault prediction in terms of 80/20 (97.14%), 70/30 (96.19%), and 5 folds cross-validation (95.85%) in the primary data results, but the algorithm complexity (0.17 seconds) is not good. In terms of 80/20 (95.71%), 70/30 (95.71%), and 5 folds cross-validation (95.71%), SMO has the second highest accuracy and less fault prediction, but the algorithm complexity is good (0.3 seconds). The difference in accuracy and less fault prediction between RF and SMO is only (.13%), and the difference in time complexity is (14 seconds). We have decided that we will modify SMO. Finally, the Modified Sequential Minimal Optimization (MSMO) Algorithm method has been proposed to get the highest accuracy & less fault prediction errors in terms of 80/20 (96.42%), 70/30 (96.42%), & 5 fold cross validation (96.50%).}, } @article {pmid37050834, year = {2023}, author = {Wang, Z and Yu, X and Xue, P and Qu, Y and Ju, L}, title = {Research on Medical Security System Based on Zero Trust.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {7}, pages = {}, pmid = {37050834}, issn = {1424-8220}, support = {2018YFB0803401//the National Key Research and Development Plan of China/ ; 328202203//the Fundamental Research Funds for the Central Universities/ ; 2019M650606//China Postdoctoral Science Foundation funded project/ ; }, mesh = {Humans ; *Trust ; *Computer Security ; Big Data ; Computer Simulation ; Cloud Computing ; }, abstract = {With the rapid development of Internet of Things technology, cloud computing, and big data, the combination of medical systems and information technology has become increasingly close. However, the emergence of intelligent medical systems has brought a series of network security threats and hidden dangers, including data leakage and remote attacks, which can directly threaten patients' lives. To ensure the security of medical information systems and expand the application of zero trust in the medical field, we combined the medical system with the zero-trust security system to propose a zero-trust medical security system. In addition, in its dynamic access control module, based on the RBAC model and the calculation of user behavior risk value and trust, an access control model based on subject behavior evaluation under zero-trust conditions (ABEAC) was designed to improve the security of medical equipment and data. Finally, the feasibility of the system is verified through a simulation experiment.}, } @article {pmid37050740, year = {2023}, author = {Kim, K and Alshenaifi, IM and Ramachandran, S and Kim, J and Zia, T and Almorjan, A}, title = {Cybersecurity and Cyber Forensics for Smart Cities: A Comprehensive Literature Review and Survey.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {7}, pages = {}, pmid = {37050740}, issn = {1424-8220}, support = {SRC-PR2-05//Naif Arab University for Security Sciences/ ; }, abstract = {Smart technologies, such as the Internet of Things (IoT), cloud computing, and artificial intelligence (AI), are being adopted in cities and transforming them into smart cities. In smart cities, various network technologies, such as the Internet and IoT, are combined to exchange real-time information, making the everyday lives of their residents more convenient. However, there is a lack of systematic research on cybersecurity and cyber forensics in smart cities. This paper presents a comprehensive review and survey of cybersecurity and cyber forensics for smart cities. We analysed 154 papers that were published from 2015 to 2022 and proposed a new framework based on a decade of related research papers. We identified four major areas and eleven sub-areas for smart cities. We found that smart homes and the IoT were the most active research areas within the cybersecurity field. Additionally, we found that research on cyber forensics for smart cities was relatively limited compared to that on cybersecurity. Since 2020, there have been many studies on the IoT (which is a technological component of smart cities) that have utilized machine learning and deep learning. Due to the transmission of large-scale data through IoT devices in smart cities, ML and DL are expected to continue playing critical roles in smart city research.}, } @article {pmid37050561, year = {2023}, author = {Elbagoury, BM and Vladareanu, L and Vlădăreanu, V and Salem, AB and Travediu, AM and Roushdy, MI}, title = {A Hybrid Stacked CNN and Residual Feedback GMDH-LSTM Deep Learning Model for Stroke Prediction Applied on Mobile AI Smart Hospital Platform.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {7}, pages = {}, pmid = {37050561}, issn = {1424-8220}, mesh = {Humans ; Artificial Intelligence ; Feedback ; *Deep Learning ; *Stroke/diagnosis ; Hospitals ; }, abstract = {Artificial intelligence (AI) techniques for intelligent mobile computing in healthcare has opened up new opportunities in healthcare systems. Combining AI techniques with the existing Internet of Medical Things (IoMT) will enhance the quality of care that patients receive at home remotely and the successful establishment of smart living environments. Building a real AI for mobile AI in an integrated smart hospital environment is a challenging problem due to the complexities of receiving IoT medical sensors data, data analysis, and deep learning algorithm complexity programming for mobile AI engine implementation AI-based cloud computing complexities, especially when we tackle real-time environments of AI technologies. In this paper, we propose a new mobile AI smart hospital platform architecture for stroke prediction and emergencies. In addition, this research is focused on developing and testing different modules of integrated AI software based on XAI architecture, this is for the mobile health app as an independent expert system or as connected with a simulated environment of an AI-cloud-based solution. The novelty is in the integrated architecture and results obtained in our previous works and this extended research on hybrid GMDH and LSTM deep learning models for the proposed artificial intelligence and IoMT engine for mobile health edge computing technology. Its main goal is to predict heart-stroke disease. Current research is still missing a mobile AI system for heart/brain stroke prediction during patient emergency cases. This research work implements AI algorithms for stroke prediction and diagnosis. The hybrid AI in connected health is based on a stacked CNN and group handling method (GMDH) predictive analytics model, enhanced with an LSTM deep learning module for biomedical signals prediction. The techniques developed depend on the dataset of electromyography (EMG) signals, which provides a significant source of information for the identification of normal and abnormal motions in a stroke scenario. The resulting artificial intelligence mHealth app is an innovation beyond the state of the art and the proposed techniques achieve high accuracy as stacked CNN reaches almost 98% for stroke diagnosis. The GMDH neural network proves to be a good technique for monitoring the EMG signal of the same patient case with an average accuracy of 98.60% to an average of 96.68% of the signal prediction. Moreover, extending the GMDH model and a hybrid LSTM with dense layers deep learning model has improved significantly the prediction results that reach an average of 99%.}, } @article {pmid37050551, year = {2023}, author = {Fahimullah, M and Philippe, G and Ahvar, S and Trocan, M}, title = {Simulation Tools for Fog Computing: A Comparative Analysis.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {7}, pages = {}, pmid = {37050551}, issn = {1424-8220}, abstract = {Fog Computing (FC) was introduced to offer resources closer to the users. Researchers propose different solutions to make FC mature and use simulators for evaluating their solutions at early stages. In this paper, we compare different FC simulators based on their technical and non-technical characteristics. In addition, a practical comparison is conducted to compare the three main FC simulators based on their performance such as execution time, CPU, and memory usage for running different applications. The analysis can be helpful for researchers to select the appropriate simulator and platform to evaluate their solutions on different use cases. Furthermore, open issues and challenges for FC simulators are discussed that require attention and need to be addressed in the future.}, } @article {pmid37050548, year = {2023}, author = {Singhal, S and Athithan, S and Alomar, MA and Kumar, R and Sharma, B and Srivastava, G and Lin, JC}, title = {Energy Aware Load Balancing Framework for Smart Grid Using Cloud and Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {7}, pages = {}, pmid = {37050548}, issn = {1424-8220}, abstract = {Data centers are producing a lot of data as cloud-based smart grids replace traditional grids. The number of automated systems has increased rapidly, which in turn necessitates the rise of cloud computing. Cloud computing helps enterprises offer services cheaply and efficiently. Despite the challenges of managing resources, longer response plus processing time, and higher energy consumption, more people are using cloud computing. Fog computing extends cloud computing. It adds cloud services that minimize traffic, increase security, and speed up processes. Cloud and fog computing help smart grids save energy by aggregating and distributing the submitted requests. The paper discusses a load-balancing approach in Smart Grid using Rock Hyrax Optimization (RHO) to optimize response time and energy consumption. The proposed algorithm assigns tasks to virtual machines for execution and shuts off unused virtual machines, reducing the energy consumed by virtual machines. The proposed model is implemented on the CloudAnalyst simulator, and the results demonstrate that the proposed method has a better and quicker response time with lower energy requirements as compared with both static and dynamic algorithms. The suggested algorithm reduces processing time by 26%, response time by 15%, energy consumption by 29%, cost by 6%, and delay by 14%.}, } @article {pmid37050477, year = {2023}, author = {García, E and Quiles, E and Correcher, A}, title = {Distributed Intelligent Battery Management System Using a Real-World Cloud Computing System.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {7}, pages = {}, pmid = {37050477}, issn = {1424-8220}, abstract = {In this work, a decentralized but synchronized real-world system for smart battery management was designed by using a general controller with cloud computing capability, four charge regulators, and a set of sensorized battery monitors with networking and Bluetooth capabilities. Currently, for real-world applications, battery management systems (BMSs) can be used in the form of distributed control systems where general controllers, charge regulators, and smart monitors and sensors are integrated, such as those proposed in this work, which allow more precise estimations of a large set of important parameters, such as the state of charge (SOC), state of health (SOH), current, voltage, and temperature, seeking the safety and the extension of the useful life of energy storage systems based on battery banks. The system used is a paradigmatic real-world example of the so-called intelligent battery management systems. One of the contributions made in this work is the realization of a distributed design of a BMS, which adds the benefit of increased system security compared to a fully centralized BMS structure. Another research contribution made in this work is the development of a methodical modeling procedure based on Petri Nets, which establishes, in a visible, organized, and precise way, the set of conditions that will determine the operation of the BMS. If this modeling is not carried out, the threshold values and their conditions remain scattered, not very transparent, and difficult to deal with in an aggregate way.}, } @article {pmid37050454, year = {2023}, author = {Akturk, E and Popescu, SC and Malambo, L}, title = {ICESat-2 for Canopy Cover Estimation at Large-Scale on a Cloud-Based Platform.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {7}, pages = {}, pmid = {37050454}, issn = {1424-8220}, abstract = {Forest canopy cover is an essential biophysical parameter of ecological significance, especially for characterizing woodlands and forests. This research focused on using data from the ICESat-2/ATLAS spaceborne lidar sensor, a photon-counting altimetry system, to map the forest canopy cover over a large country extent. The study proposed a novel approach to compute categorized canopy cover using photon-counting data and available ancillary Landsat images to build the canopy cover model. In addition, this research tested a cloud-mapping platform, the Google Earth Engine (GEE), as an example of a large-scale study. The canopy cover map of the Republic of Türkiye produced from this study has an average accuracy of over 70%. Even though the results were promising, it has been determined that the issues caused by the auxiliary data negatively affect the overall success. Moreover, while GEE offered many benefits, such as user-friendliness and convenience, it had processing limits that posed challenges for large-scale studies. Using weak or strong beams' segments separately did not show a significant difference in estimating canopy cover. Briefly, this study demonstrates the potential of using photon-counting data and GEE for mapping forest canopy cover at a large scale.}, } @article {pmid37046022, year = {2023}, author = {Hemati, M and Hasanlou, M and Mahdianpari, M and Mohammadimanesh, F}, title = {Iranian wetland inventory map at a spatial resolution of 10 m using Sentinel-1 and Sentinel-2 data on the Google Earth Engine cloud computing platform.}, journal = {Environmental monitoring and assessment}, volume = {195}, number = {5}, pages = {558}, pmid = {37046022}, issn = {1573-2959}, mesh = {*Wetlands ; Iran ; *Cloud Computing ; Search Engine ; Environmental Monitoring/methods ; }, abstract = {Detailed wetland inventories and information about the spatial arrangement and the extent of wetland types across the Earth's surface are crucially important for resource assessment and sustainable management. In addition, it is crucial to update these inventories due to the highly dynamic characteristics of the wetlands. Remote sensing technologies capturing high-resolution and multi-temporal views of landscapes are incredibly beneficial in wetland mapping compared to traditional methods. Taking advantage of the Google Earth Engine's computational power and multi-source earth observation data from Sentinel-1 multi-spectral sensor and Sentinel-2 radar, we generated a 10 m nationwide wetlands inventory map for Iran. The whole country is mapped using an object-based image processing framework, containing SNIC superpixel segmentation and a Random Forest classifier that was performed for four different ecological zones of Iran separately. Reference data was provided by different sources and through both field and office-based methods. Almost 70% of this data was used for the training stage and the other 30% for evaluation. The whole map overall accuracy was 96.39% and the producer's accuracy for wetland classes ranged from nearly 65 to 99%. It is estimated that 22,384 km[2] of Iran are covered with water bodies and wetland classes, and emergent and shrub-dominated are the most common wetland classes in Iran. Considering the water crisis that has been started in Iran, the resulting ever-demanding map of Iranian wetland sites offers remarkable information about wetland boundaries and spatial distribution of wetland species, and therefore it is helpful for both governmental and commercial sectors.}, } @article {pmid37025550, year = {2023}, author = {Chlasta, K and Sochaczewski, P and Wójcik, GM and Krejtz, I}, title = {Neural simulation pipeline: Enabling container-based simulations on-premise and in public clouds.}, journal = {Frontiers in neuroinformatics}, volume = {17}, number = {}, pages = {1122470}, pmid = {37025550}, issn = {1662-5196}, abstract = {In this study, we explore the simulation setup in computational neuroscience. We use GENESIS, a general purpose simulation engine for sub-cellular components and biochemical reactions, realistic neuron models, large neural networks, and system-level models. GENESIS supports developing and running computer simulations but leaves a gap for setting up today's larger and more complex models. The field of realistic models of brain networks has overgrown the simplicity of earliest models. The challenges include managing the complexity of software dependencies and various models, setting up model parameter values, storing the input parameters alongside the results, and providing execution statistics. Moreover, in the high performance computing (HPC) context, public cloud resources are becoming an alternative to the expensive on-premises clusters. We present Neural Simulation Pipeline (NSP), which facilitates the large-scale computer simulations and their deployment to multiple computing infrastructures using the infrastructure as the code (IaC) containerization approach. The authors demonstrate the effectiveness of NSP in a pattern recognition task programmed with GENESIS, through a custom-built visual system, called RetNet(8 × 5,1) that uses biologically plausible Hodgkin-Huxley spiking neurons. We evaluate the pipeline by performing 54 simulations executed on-premise, at the Hasso Plattner Institute's (HPI) Future Service-Oriented Computing (SOC) Lab, and through the Amazon Web Services (AWS), the biggest public cloud service provider in the world. We report on the non-containerized and containerized execution with Docker, as well as present the cost per simulation in AWS. The results show that our neural simulation pipeline can reduce entry barriers to neural simulations, making them more practical and cost-effective.}, } @article {pmid37023063, year = {2023}, author = {Hou, YF and Ge, F and Dral, PO}, title = {Explicit Learning of Derivatives with the KREG and pKREG Models on the Example of Accurate Representation of Molecular Potential Energy Surfaces.}, journal = {Journal of chemical theory and computation}, volume = {19}, number = {8}, pages = {2369-2379}, doi = {10.1021/acs.jctc.2c01038}, pmid = {37023063}, issn = {1549-9626}, abstract = {The KREG and pKREG models were proven to enable accurate learning of multidimensional single-molecule surfaces of quantum chemical properties such as ground-state potential energies, excitation energies, and oscillator strengths. These models are based on kernel ridge regression (KRR) with the Gaussian kernel function and employ a relative-to-equilibrium (RE) global molecular descriptor, while pKREG is designed to enforce invariance under atom permutations with a permutationally invariant kernel. Here we extend these two models to also explicitly include the derivative information from the training data into the models, which greatly improves their accuracy. We demonstrate on the example of learning potential energies and energy gradients that KREG and pKREG models are better or on par with state-of-the-art machine learning models. We also found that in challenging cases both energy and energy gradient labels should be learned to properly model potential energy surfaces and learning only energies or gradients is insufficient. The models' open-source implementation is freely available in the MLatom package for general-purpose atomistic machine learning simulations, which can be also performed on the MLatom@XACS cloud computing service.}, } @article {pmid37018339, year = {2024}, author = {Yu, H and Zhang, Q and Yang, LT}, title = {An Edge-Cloud-Aided Private High-Order Fuzzy C-Means Clustering Algorithm in Smart Healthcare.}, journal = {IEEE/ACM transactions on computational biology and bioinformatics}, volume = {21}, number = {4}, pages = {1083-1092}, doi = {10.1109/TCBB.2022.3233380}, pmid = {37018339}, issn = {1557-9964}, mesh = {*Fuzzy Logic ; *Algorithms ; Humans ; Cluster Analysis ; *Cloud Computing ; Deep Learning ; Delivery of Health Care ; }, abstract = {Smart healthcare has emerged to provide healthcare services using data analysis techniques. Especially, clustering is playing an indispensable role in analyzing healthcare records. However, large multi-modal healthcare data imposes great challenges on clustering. Specifically, it is hard for traditional approaches to obtain desirable results for healthcare data clustering since they are not able to work for multi-modal data. This paper presents a new high-order multi-modal learning approach using multimodal deep learning and the Tucker decomposition (F- HoFCM). Furthermore, we propose an edge-cloud-aided private scheme to facilitate the clustering efficiency for its embedding in edge resources. Specifically, the computationally intensive tasks, such as parameter updating with high-order back propagation algorithm and clustering through high-order fuzzy c-means, are processed in a centralized location with cloud computing. The other tasks such as multi-modal data fusion and Tucker decomposition are performed at the edge resources. Since the feature fusion and Tucker decomposition are nonlinear operations, the cloud cannot obtain the raw data, thus protecting the privacy. Experimental results state that the presented approach produces significantly more accurate results than the existing high-order fuzzy c-means (HOFCM) on multi-modal healthcare datasets and furthermore the clustering efficiency are significantly improved by the developed edge-cloud-aided private healthcare system.}, } @article {pmid37015671, year = {2024}, author = {Disabato, S and Roveri, M}, title = {Tiny Machine Learning for Concept Drift.}, journal = {IEEE transactions on neural networks and learning systems}, volume = {35}, number = {6}, pages = {8470-8481}, doi = {10.1109/TNNLS.2022.3229897}, pmid = {37015671}, issn = {2162-2388}, abstract = {Tiny machine learning (TML) is a new research area whose goal is to design machine and deep learning (DL) techniques able to operate in embedded systems and the Internet-of-Things (IoT) units, hence satisfying the severe technological constraints on memory, computation, and energy characterizing these pervasive devices. Interestingly, the related literature mainly focused on reducing the computational and memory demand of the inference phase of machine and deep learning models. At the same time, the training is typically assumed to be carried out in cloud or edge computing systems (due to the larger memory and computational requirements). This assumption results in TML solutions that might become obsolete when the process generating the data is affected by concept drift (e.g., due to periodicity or seasonality effect, faults or malfunctioning affecting sensors or actuators, or changes in the users' behavior), a common situation in real-world application scenarios. For the first time in the literature, this article introduces a TML for concept drift (TML-CD) solution based on deep learning feature extractors and a k -nearest neighbors (k -NNs) classifier integrating a hybrid adaptation module able to deal with concept drift affecting the data-generating process. This adaptation module continuously updates (in a passive way) the knowledge base of TML-CD and, at the same time, employs a change detection test (CDT) to inspect for changes (in an active way) to quickly adapt to concept drift by removing obsolete knowledge. Experimental results on both image and audio benchmarks show the effectiveness of the proposed solution, whilst the porting of TML-CD on three off-the-shelf micro-controller units (MCUs) shows the feasibility of what is proposed in real-world pervasive systems.}, } @article {pmid37012744, year = {2023}, author = {Wang, Y and Li, J and Wang, H and Yan, Z and Xu, Z and Li, C and Zhao, Z and Raza, SA}, title = {Non-contact wearable synchronous measurement method of electrocardiogram and seismocardiogram signals.}, journal = {The Review of scientific instruments}, volume = {94}, number = {3}, pages = {034101}, doi = {10.1063/5.0120722}, pmid = {37012744}, issn = {1089-7623}, mesh = {Humans ; *Artificial Intelligence ; Signal Processing, Computer-Assisted ; Electrocardiography/methods ; Heart ; *Wearable Electronic Devices ; }, abstract = {Cardiovascular disease is one of the leading threats to human lives and its fatality rate still rises gradually year by year. Driven by the development of advanced information technologies, such as big data, cloud computing, and artificial intelligence, remote/distributed cardiac healthcare is presenting a promising future. The traditional dynamic cardiac health monitoring method based on electrocardiogram (ECG) signals only has obvious deficiencies in comfortableness, informativeness, and accuracy under motion state. Therefore, a non-contact, compact, wearable, synchronous ECG and seismocardiogram (SCG) measuring system, based on a pair of capacitance coupling electrodes with ultra-high input impedance, and a high-resolution accelerometer were developed in this work, which can collect the ECG and SCG signals at the same point simultaneously through the multi-layer cloth. Meanwhile, the driven right leg electrode for ECG measurement is replaced by the AgCl fabric sewn to the outside of the cloth for realizing the total gel-free ECG measurement. Besides, synchronous ECG and SCG signals at multiple points on the chest surface were measured, and the recommended measuring points were given by their amplitude characteristics and the timing sequence correspondence analysis. Finally, the empirical mode decomposition algorithm was used to adaptively filter the motion artifacts within the ECG and SCG signals for measuring performance enhancement under motion states. The results demonstrate that the proposed non-contact, wearable cardiac health monitoring system can effectively collect ECG and SCG synchronously under various measuring situations.}, } @article {pmid37010950, year = {2023}, author = {Ansari, M and White, AD}, title = {Serverless Prediction of Peptide Properties with Recurrent Neural Networks.}, journal = {Journal of chemical information and modeling}, volume = {63}, number = {8}, pages = {2546-2553}, pmid = {37010950}, issn = {1549-960X}, support = {R35 GM137966/GM/NIGMS NIH HHS/United States ; }, mesh = {Reproducibility of Results ; *Neural Networks, Computer ; *Peptides ; Machine Learning ; Cloud Computing ; }, abstract = {We present three deep learning sequence-based prediction models for peptide properties including hemolysis, solubility, and resistance to nonspecific interactions that achieve comparable results to the state-of-the-art models. Our sequence-based solubility predictor, MahLooL, outperforms the current state-of-the-art methods for short peptides. These models are implemented as a static website without the use of a dedicated server or cloud computing. Web-based models like this allow for accessible and effective reproducibility. Most existing approaches rely on third-party servers that typically require upkeep and maintenance. Our predictive models do not require servers, require no installation of dependencies, and work across a range of devices. The specific architecture is bidirectional recurrent neural networks. This serverless approach is a demonstration of edge machine learning that removes the dependence on cloud providers. The code and models are accessible at https://github.com/ur-whitelab/peptide-dashboard.}, } @article {pmid37007983, year = {2023}, author = {Elouali, A and Mora Mora, H and Mora-Gimeno, FJ}, title = {Data transmission reduction formalization for cloud offloading-based IoT systems.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {12}, number = {1}, pages = {48}, pmid = {37007983}, issn = {2192-113X}, abstract = {Computation offloading is the solution for IoT devices of limited resources and high-cost processing requirements. However, the network related issues such as latency and bandwidth consumption need to be considered. Data transmission reduction is one of the solutions aiming to solve network related problems by reducing the amount of data transmitted. In this paper, we propose a generalized formal data transmission reduction model independent of the system and the data type. This formalization is based on two main ideas: 1) Not sending data until a significant change occurs, 2) Sending a lighter size entity permitting the cloud to deduct the data captured by the IoT device without actually receiving it. This paper includes the mathematical representation of the model, general evaluation metrics formulas as well as detailed projections on real world use cases.}, } @article {pmid37007982, year = {2023}, author = {Zhong, L}, title = {A convolutional neural network based online teaching method using edge-cloud computing platform.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {12}, number = {1}, pages = {49}, pmid = {37007982}, issn = {2192-113X}, abstract = {Teaching has become a complex essential tool for students' abilities, due to their different levels of learning and understanding. In the traditional offline teaching methods, dance teachers lack a target for students 'classroom teaching. Furthermore, teachers have limited time, so they cannot take full care of each student's learning needs according to their understanding and learning ability, which leads to the polarization of the learning effect. Because of this, this paper proposes an online teaching method based on Artificial Intelligence and edge calculation. In the first phase, standard teaching and student-recorded dance learning videos are conducted through the key frames extraction through a deep convolutional neural network. In the second phase, the extracted key frame images were then extracted for human key points using grid coding, and the fully convolutional neural network was used to predict the human posture. The guidance vector is used to correct the dance movements to achieve the purpose of online learning. The CNN model is distributed into two parts so that the training occurs at the cloud and prediction happens at the edge server. Moreover, the questionnaire was used to obtain the students' learning status, understand their difficulties in dance learning, and record the corresponding dance teaching videos to make up for their weak links. Finally, the edge-cloud computing platform is used to help the training model learn quickly form vast amount of collected data. Our experiments show that the cloud-edge platform helps to support new teaching forms, enhance the platform's overall application performance and intelligence level, and improve the online learning experience. The application of this paper can help dance students to achieve efficient learning.}, } @article {pmid37003227, year = {2023}, author = {Flores-de-Santiago, F and Rodríguez-Sobreyra, R and Álvarez-Sánchez, LF and Valderrama-Landeros, L and Amezcua, F and Flores-Verdugo, F}, title = {Understanding the natural expansion of white mangrove (Laguncularia racemosa) in an ephemeral inlet based on geomorphological analysis and remote sensing data.}, journal = {Journal of environmental management}, volume = {338}, number = {}, pages = {117820}, doi = {10.1016/j.jenvman.2023.117820}, pmid = {37003227}, issn = {1095-8630}, mesh = {*Bays ; *Combretaceae ; Forests ; *Remote Sensing Technology/methods ; Water ; *Wetlands ; }, abstract = {The interactions between local tides and river discharges are crucial in the processes related to the recruitment of mangrove propagules in estuarine systems. This investigation aimed to determine the causes of the recent natural recruitment and expansion of Laguncularia racemosa in mudflats within an ephemeral inlet in Mexico. We conducted a fluvial and coastal geomorphology assessment with spaceborne and UAV-based images. We deployed and recorded continuous data loggers in the estuarine system to assess water level and salinity. Depending on the available data, we used a combination of cloud-computing Google Earth Engine, UAV-Digital Surface Models, LiDAR, Google Earth images, and biophysical variables to monitor mangrove forests from 2005 to 2022. When the inlet is open, the estuarine system presents a full tidal range (∼1-1.5 m) with a strong salinity gradient (0-35 mS/cm), in contrast to the strong freshwater influence and minimal water level variability (<10 cm) that prevails for three months when the inlet is closed. Once the mouth of the river closes, there is considerable sediment accumulation, creating mudflat areas adjacent to the mangrove forests where Laguncularia racemosa propagules begin to establish under minimal water level variability and oligohaline conditions. After 16 years, the new forest expanded by 12.3 ha, presenting a very high density (10000 stems/ha), a considerable basal area (54-63 m[2]/ha), and a maximum canopy height of 15.8 m, which largely surpasses that of other semiarid Laguncularia racemosa forests within permanent open-inlet systems or even in ephemeral inlets with different hydrological conditions. Our study will help to understand the causes of natural Laguncularia racemosa recruitment in extremely dynamic systems.}, } @article {pmid37002322, year = {2023}, author = {Hassan, A and Elhoseny, M and Kayed, M}, title = {Hierarchical cloud architecture for identifying the bite of "Egyptian cobra" based on deep learning and quantum particle swarm optimization.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {5250}, pmid = {37002322}, issn = {2045-2322}, abstract = {One of the most dangerous snake species is the "Egyptian cobra" which can kill a man in only 15 min. This paper uses deep learning techniques to identify the Egyptian cobra bite in an accurate manner based on an image of the marks of the bites. We build a dataset consisting of 500 images of cobra bites marks and 600 images of marks of other species of snakes that exist in Egypt. We utilize techniques such as multi-task learning, transfer learning and data augmentation to boost the generalization and accuracy of our model. We have achieved 90.9% of accuracy. We must keep the availability and accuracy of our model as much as possible. So, we utilize cloud and edge computing techniques to enhance the availability of our model. We have achieved 90.9% of accuracy, which is considered as an efficient result, not 100%, so it is normal for the system to perform sometimes wrong classifications. So, we suggest to re-train our model with the wrong predictions, whereas the edge computing units, where the classifier task is positioned, resend the wrong predictions to the cloud model, where the training process occurs, to retrain the model. This enhances the accuracy to the best level after a small period and increases the dataset size. We use the quantum particle swarm optimization technique to determine the optimal required number of edge nodes.}, } @article {pmid36998174, year = {2023}, author = {Ru, J and Khan Mirzaei, M and Xue, J and Peng, X and Deng, L}, title = {ViroProfiler: a containerized bioinformatics pipeline for viral metagenomic data analysis.}, journal = {Gut microbes}, volume = {15}, number = {1}, pages = {2192522}, pmid = {36998174}, issn = {1949-0984}, mesh = {Software ; *Gastrointestinal Microbiome ; Reproducibility of Results ; Metagenome ; *Microbiota ; Metagenomics/methods ; Computational Biology/methods ; Data Analysis ; }, abstract = {Bacteriophages play central roles in the maintenance and function of most ecosystems by regulating bacterial communities. Yet, our understanding of their diversity remains limited due to the lack of robust bioinformatics standards. Here we present ViroProfiler, an in-silico workflow for analyzing shotgun viral metagenomic data. ViroProfiler can be executed on a local Linux computer or cloud computing environments. It uses the containerization technique to ensure computational reproducibility and facilitate collaborative research. ViroProfiler is freely available at https://github.com/deng-lab/viroprofiler.}, } @article {pmid36993557, year = {2023}, author = {Renton, AI and Dao, TT and Johnstone, T and Civier, O and Sullivan, RP and White, DJ and Lyons, P and Slade, BM and Abbott, DF and Amos, TJ and Bollmann, S and Botting, A and Campbell, MEJ and Chang, J and Close, TG and Eckstein, K and Egan, GF and Evas, S and Flandin, G and Garner, KG and Garrido, MI and Ghosh, SS and Grignard, M and Hannan, AJ and Huber, R and Kaczmarzyk, JR and Kasper, L and Kuhlmann, L and Lou, K and Mantilla-Ramos, YJ and Mattingley, JB and Morris, J and Narayanan, A and Pestilli, F and Puce, A and Ribeiro, FL and Rogasch, NC and Rorden, C and Schira, M and Shaw, TB and Sowman, PF and Spitz, G and Stewart, A and Ye, X and Zhu, JD and Hughes, ME and Narayanan, A and Bollmann, S}, title = {Neurodesk: An accessible, flexible, and portable data analysis environment for reproducible neuroimaging.}, journal = {Research square}, volume = {}, number = {}, pages = {}, pmid = {36993557}, issn = {2693-5015}, support = {P41 EB019936/EB/NIBIB NIH HHS/United States ; R01 EB030896/EB/NIBIB NIH HHS/United States ; }, abstract = {Neuroimaging data analysis often requires purpose-built software, which can be challenging to install and may produce different results across computing environments. Beyond being a roadblock to neuroscientists, these issues of accessibility and portability can hamper the reproducibility of neuroimaging data analysis pipelines. Here, we introduce the Neurodesk platform, which harnesses software containers to support a comprehensive and growing suite of neuroimaging software (https://www.neurodesk.org/). Neurodesk includes a browser-accessible virtual desktop environment and a command line interface, mediating access to containerized neuroimaging software libraries on various computing platforms, including personal and high-performance computers, cloud computing and Jupyter Notebooks. This community-oriented, open-source platform enables a paradigm shift for neuroimaging data analysis, allowing for accessible, flexible, fully reproducible, and portable data analysis pipelines.}, } @article {pmid36991964, year = {2023}, author = {Alharbi, HA and Aldossary, M and Almutairi, J and Elgendy, IA}, title = {Energy-Aware and Secure Task Offloading for Multi-Tier Edge-Cloud Computing Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {6}, pages = {}, pmid = {36991964}, issn = {1424-8220}, abstract = {Nowadays, Unmanned Aerial Vehicle (UAV) devices and their services and applications are gaining popularity and attracting considerable attention in different fields of our daily life. Nevertheless, most of these applications and services require more powerful computational resources and energy, and their limited battery capacity and processing power make it difficult to run them on a single device. Edge-Cloud Computing (ECC) is emerging as a new paradigm to cope with the challenges of these applications, which moves computing resources to the edge of the network and remote cloud, thereby alleviating the overhead through task offloading. Even though ECC offers substantial benefits for these devices, the limited bandwidth condition in the case of simultaneous offloading via the same channel with increasing data transmission of these applications has not been adequately addressed. Moreover, protecting the data through transmission remains a significant concern that still needs to be addressed. Therefore, in this paper, to bypass the limited bandwidth and address the potential security threats challenge, a new compression, security, and energy-aware task offloading framework is proposed for the ECC system environment. Specifically, we first introduce an efficient layer of compression to smartly reduce the transmission data over the channel. In addition, to address the security issue, a new layer of security based on an Advanced Encryption Standard (AES) cryptographic technique is presented to protect offloaded and sensitive data from different vulnerabilities. Subsequently, task offloading, data compression, and security are jointly formulated as a mixed integer problem whose objective is to reduce the overall energy of the system under latency constraints. Finally, simulation results reveal that our model is scalable and can cause a significant reduction in energy consumption (i.e., 19%, 18%, 21%, 14.5%, 13.1% and 12%) with respect to other benchmarks (i.e., local, edge, cloud and further benchmark models).}, } @article {pmid36991820, year = {2023}, author = {Morkevičius, N and Liutkevičius, A and Venčkauskas, A}, title = {Multi-Objective Path Optimization in Fog Architectures Using the Particle Swarm Optimization Approach.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {6}, pages = {}, pmid = {36991820}, issn = {1424-8220}, abstract = {IoT systems can successfully employ wireless sensor networks (WSNs) for data gathering and fog/edge computing for processing collected data and providing services. The proximity of edge devices to sensors improves latency, whereas cloud assets provide higher computational power when needed. Fog networks include various heterogeneous fog nodes and end-devices, some of which are mobile, such as vehicles, smartwatches, and cell phones, while others are static, such as traffic cameras. Therefore, some nodes in the fog network can be randomly organized, forming a self-organizing ad hoc structure. Moreover, fog nodes can have different resource constraints, such as energy, security, computational power, and latency. Therefore, two major problems arise in fog networks: ensuring optimal service (application) placement and determining the optimal path between the user end-device and the fog node that provides the services. Both problems require a simple and lightweight method that can rapidly identify a good solution using the constrained resources available in the fog nodes. In this paper, a novel two-stage multi-objective path optimization method is proposed that optimizes the data routing path between the end-device and fog node(s). A particle swarm optimization (PSO) method is used to determine the Pareto Frontier of alternative data paths, and then the analytical hierarchy process (AHP) is used to choose the best path alternative according to the application-specific preference matrix. The results show that the proposed method works with a wide range of objective functions that can be easily expanded. Moreover, the proposed method provides a whole set of alternative solutions and evaluates each of them, allowing us to choose the second- or third-best alternative if the first one is not suitable for some reason.}, } @article {pmid36991748, year = {2023}, author = {Cadenas, JM and Garrido, MC and Martínez-España, R}, title = {A Methodology Based on Machine Learning and Soft Computing to Design More Sustainable Agriculture Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {6}, pages = {}, pmid = {36991748}, issn = {1424-8220}, support = {2020-112675RB-C44//MCIN/AEI/ 10.13039/501100011033/ ; }, abstract = {Advances in new technologies are allowing any field of real life to benefit from using these ones. Among of them, we can highlight the IoT ecosystem making available large amounts of information, cloud computing allowing large computational capacities, and Machine Learning techniques together with the Soft Computing framework to incorporate intelligence. They constitute a powerful set of tools that allow us to define Decision Support Systems that improve decisions in a wide range of real-life problems. In this paper, we focus on the agricultural sector and the issue of sustainability. We propose a methodology that, starting from times series data provided by the IoT ecosystem, a preprocessing and modelling of the data based on machine learning techniques is carried out within the framework of Soft Computing. The obtained model will be able to carry out inferences in a given prediction horizon that allow the development of Decision Support Systems that can help the farmer. By way of illustration, the proposed methodology is applied to the specific problem of early frost prediction. With some specific scenarios validated by expert farmers in an agricultural cooperative, the benefits of the methodology are illustrated. The evaluation and validation show the effectiveness of the proposal.}, } @article {pmid36991663, year = {2023}, author = {Al-Jumaili, AHA and Muniyandi, RC and Hasan, MK and Paw, JKS and Singh, MJ}, title = {Big Data Analytics Using Cloud Computing Based Frameworks for Power Management Systems: Status, Constraints, and Future Recommendations.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {6}, pages = {}, pmid = {36991663}, issn = {1424-8220}, support = {FRGS/1/2021/ICT07/UKM/02/1//National University of Malaysia/ ; }, abstract = {Traditional parallel computing for power management systems has prime challenges such as execution time, computational complexity, and efficiency like process time and delays in power system condition monitoring, particularly consumer power consumption, weather data, and power generation for detecting and predicting data mining in the centralized parallel processing and diagnosis. Due to these constraints, data management has become a critical research consideration and bottleneck. To cope with these constraints, cloud computing-based methodologies have been introduced for managing data efficiently in power management systems. This paper reviews the concept of cloud computing architecture that can meet the multi-level real-time requirements to improve monitoring and performance which is designed for different application scenarios for power system monitoring. Then, cloud computing solutions are discussed under the background of big data, and emerging parallel programming models such as Hadoop, Spark, and Storm are briefly described to analyze the advancement, constraints, and innovations. The key performance metrics of cloud computing applications such as core data sampling, modeling, and analyzing the competitiveness of big data was modeled by applying related hypotheses. Finally, it introduces a new design concept with cloud computing and eventually some recommendations focusing on cloud computing infrastructure, and methods for managing real-time big data in the power management system that solve the data mining challenges.}, } @article {pmid36991661, year = {2023}, author = {Gabriele, M and Brumana, R}, title = {Monitoring Land Degradation Dynamics to Support Landscape Restoration Actions in Remote Areas of the Mediterranean Basin (Murcia Region, Spain).}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {6}, pages = {}, pmid = {36991661}, issn = {1424-8220}, abstract = {This study aims to develop a workflow methodology for collecting substantial amounts of Earth Observation data to investigate the effectiveness of landscape restoration actions and support the implementation of the Above Ground Carbon Capture indicator of the Ecosystem Restoration Camps (ERC) Soil Framework. To achieve this objective, the study will utilize the Google Earth Engine API within R (rGEE) to monitor the Normalized Difference Vegetation Index (NDVI). The results of this study will provide a common scalable reference for ERC camps globally, with a specific focus on Camp Altiplano, the first European ERC located in Murcia, Southern Spain. The coding workflow has effectively acquired almost 12 TB of data for analyzing MODIS/006/MOD13Q1 NDVI over a 20-year span. Additionally, the average retrieval of image collections has yielded 120 GB of data for the COPERNICUS/S2_SR 2017 vegetation growing season and 350 GB of data for the COPERNICUS/S2_SR 2022 vegetation winter season. Based on these results, it is reasonable to asseverate that cloud computing platforms like GEE will enable the monitoring and documentation of regenerative techniques to achieve unprecedented levels. The findings will be shared on a predictive platform called Restor, which will contribute to the development of a global ecosystem restoration model.}, } @article {pmid36991641, year = {2023}, author = {S-Julián, R and Lacalle, I and Vaño, R and Boronat, F and Palau, CE}, title = {Self-* Capabilities of Cloud-Edge Nodes: A Research Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {6}, pages = {}, pmid = {36991641}, issn = {1424-8220}, support = {101069732//European Commission/ ; }, abstract = {Most recent edge and fog computing architectures aim at pushing cloud-native traits at the edge of the network, reducing latency, power consumption, and network overhead, allowing operations to be performed close to data sources. To manage these architectures in an autonomous way, systems that materialize in specific computing nodes must deploy self-* capabilities minimizing human intervention across the continuum of computing equipment. Nowadays, a systematic classification of such capabilities is missing, as well as an analysis on how those can be implemented. For a system owner in a continuum deployment, there is not a main reference publication to consult to determine what capabilities do exist and which are the sources to rely on. In this article, a literature review is conducted to analyze the self-* capabilities needed to achieve a self-* equipped nature in truly autonomous systems. The article aims to shed light on a potential uniting taxonomy in this heterogeneous field. In addition, the results provided include conclusions on why those aspects are too heterogeneously tackled, depend hugely on specific cases, and shed light on why there is not a clear reference architecture to guide on the matter of which traits to equip the nodes with.}, } @article {pmid36991190, year = {2023}, author = {Rao, M and Tang, H and Wu, J and Song, W and Zhang, M and Yin, W and Zhuo, Y and Kiani, F and Chen, B and Jiang, X and Liu, H and Chen, HY and Midya, R and Ye, F and Jiang, H and Wang, Z and Wu, M and Hu, M and Wang, H and Xia, Q and Ge, N and Li, J and Yang, JJ}, title = {Thousands of conductance levels in memristors integrated on CMOS.}, journal = {Nature}, volume = {615}, number = {7954}, pages = {823-829}, pmid = {36991190}, issn = {1476-4687}, support = {FA9550-19-1-0213//US Airforce Research Laboratory/ ; W911NF2120128//Army Research Office/ ; CMMI-2240407//National Science Foundation/ ; CMMI-1922206//National Science Foundation/ ; }, abstract = {Neural networks based on memristive devices[1-3] have the ability to improve throughput and energy efficiency for machine learning[4,5] and artificial intelligence[6], especially in edge applications[7-21]. Because training a neural network model from scratch is costly in terms of hardware resources, time and energy, it is impractical to do it individually on billions of memristive neural networks distributed at the edge. A practical approach would be to download the synaptic weights obtained from the cloud training and program them directly into memristors for the commercialization of edge applications. Some post-tuning in memristor conductance could be done afterwards or during applications to adapt to specific situations. Therefore, in neural network applications, memristors require high-precision programmability to guarantee uniform and accurate performance across a large number of memristive networks[22-28]. This requires many distinguishable conductance levels on each memristive device, not only laboratory-made devices but also devices fabricated in factories. Analog memristors with many conductance states also benefit other applications, such as neural network training, scientific computing and even 'mortal computing'[25,29,30]. Here we report 2,048 conductance levels achieved with memristors in fully integrated chips with 256 × 256 memristor arrays monolithically integrated on complementary metal-oxide-semiconductor (CMOS) circuits in a commercial foundry. We have identified the underlying physics that previously limited the number of conductance levels that could be achieved in memristors and developed electrical operation protocols to avoid such limitations. These results provide insights into the fundamental understanding of the microscopic picture of memristive switching as well as approaches to enable high-precision memristors for various applications. Fig. 1 HIGH-PRECISION MEMRISTOR FOR NEUROMORPHIC COMPUTING.: a, Proposed scheme of the large-scale application of memristive neural networks for edge computing. Neural network training is performed in the cloud. The obtained weights are downloaded and accurately programmed into a massive number of memristor arrays distributed at the edge, which imposes high-precision requirements on memristive devices. b, An eight-inch wafer with memristors fabricated by a commercial semiconductor manufacturer. c, High-resolution transmission electron microscopy image of the cross-section view of a memristor. Pt and Ta serve as the bottom electrode (BE) and top electrode (TE), respectively. Scale bars, 1 μm and 100 nm (inset). d, Magnification of the memristor material stack. Scale bar, 5 nm. e, As-programmed (blue) and after-denoising (red) currents of a memristor are read by a constant voltage (0.2 V). The denoising process eliminated the large-amplitude RTN observed in the as-programmed state (see Methods). f, Magnification of three nearest-neighbour states after denoising. The current of each state was read by a constant voltage (0.2 V). No large-amplitude RTN was observed, and all of the states can be clearly distinguished. g, An individual memristor on the chip was tuned into 2,048 resistance levels by high-resolution off-chip driving circuitry, and each resistance level was read by a d.c. voltage sweeping from 0 to 0.2 V. The target resistance was set from 50 µS to 4,144 µS with a 2-µS interval between neighbouring levels. All readings at 0.2 V are less than 1 µS from the target conductance. Bottom inset, magnification of the resistance levels. Top inset, experimental results of an entire 256 × 256 array programmed by its 6-bit on-chip circuitry into 64 32 × 32 blocks, and each block is programmed into one of the 64 conductance levels. Each of the 256 × 256 memristors has been previously switched over one million cycles, demonstrating the high endurance and robustness of the devices.}, } @article {pmid36990988, year = {2023}, author = {Kusunose, M and Muto, K}, title = {Public attitudes toward cloud computing and willingness to share personal health records (PHRs) and genome data for health care research in Japan.}, journal = {Human genome variation}, volume = {10}, number = {1}, pages = {11}, pmid = {36990988}, issn = {2054-345X}, support = {JP19km0405501//Japan Agency for Medical Research and Development (AMED)/ ; JP22ama221001//Japan Agency for Medical Research and Development (AMED)/ ; JP19km0405501//Japan Agency for Medical Research and Development (AMED)/ ; JP22ama221001//Japan Agency for Medical Research and Development (AMED)/ ; JP80745985//MEXT | Japan Society for the Promotion of Science (JSPS)/ ; JP80745985//MEXT | Japan Society for the Promotion of Science (JSPS)/ ; }, abstract = {Japan's government aims to promote the linkage of medical records, including medical genomic testing data and personal health records (PHRs), via cloud computing (the cloud). However, linking national medical records and using them for health care research can be controversial. Additionally, many ethical issues with using cloud networks with health care and genome data have been noted. However, no research has yet explored the Japanese public's opinions about their PHRs, including genome data, being shared for health care research or the use of the cloud for storing and analyzing such data. Therefore, we conducted a survey in March 2021 to clarify the public's attitudes toward sharing their PHRs, including genome data and using the cloud for health care research. We analyzed data to experimentally create digital health basic literacy scores (BLSs). Our results showed that the Japanese public had concerns about data sharing that overlapped with structural cloud computing issues. The effect of incentives on changes in participants' willingness to share data (WTSD) was limited. Instead, there could be a correlation between WTSD and BLSs. Finally, we argue that it is vital to consider not only researchers but also research participants as value cocreators in health care research conducted through the cloud to overcome both parties' vulnerability.}, } @article {pmid36977690, year = {2023}, author = {Rogers, DM and Agarwal, R and Vermaas, JV and Smith, MD and Rajeshwar, RT and Cooper, C and Sedova, A and Boehm, S and Baker, M and Glaser, J and Smith, JC}, title = {SARS-CoV2 billion-compound docking.}, journal = {Scientific data}, volume = {10}, number = {1}, pages = {173}, pmid = {36977690}, issn = {2052-4463}, support = {DE-AC05-00OR22725//DOE | Office of Science (SC)/ ; DE-AC05-00OR22725//DOE | Office of Science (SC)/ ; DE-AC05-00OR22725//DOE | Office of Science (SC)/ ; DE-AC05-00OR22725//DOE | Office of Science (SC)/ ; DE-AC05-00OR22725//DOE | Office of Science (SC)/ ; DE-AC05-00OR22725//DOE | Office of Science (SC)/ ; DE-AC05-00OR22725//DOE | Office of Science (SC)/ ; DE-AC05-00OR22725//DOE | Office of Science (SC)/ ; }, mesh = {Humans ; *COVID-19 ; *Ligands ; *SARS-CoV-2 ; Molecular Docking Simulation ; }, abstract = {This dataset contains ligand conformations and docking scores for 1.4 billion molecules docked against 6 structural targets from SARS-CoV2, representing 5 unique proteins: MPro, NSP15, PLPro, RDRP, and the Spike protein. Docking was carried out using the AutoDock-GPU platform on the Summit supercomputer and Google Cloud. The docking procedure employed the Solis Wets search method to generate 20 independent ligand binding poses per compound. Each compound geometry was scored using the AutoDock free energy estimate, and rescored using RFScore v3 and DUD-E machine-learned rescoring models. Input protein structures are included, suitable for use by AutoDock-GPU and other docking programs. As the result of an exceptionally large docking campaign, this dataset represents a valuable resource for discovering trends across small molecule and protein binding sites, training AI models, and comparing to inhibitor compounds targeting SARS-CoV-2. The work also gives an example of how to organize and process data from ultra-large docking screens.}, } @article {pmid36970305, year = {2022}, author = {Christensen, JR and Golden, HE and Alexander, LC and Pickard, BR and Fritz, KM and Lane, CR and Weber, MH and Kwok, RM and Keefer, MN}, title = {Headwater streams and inland wetlands: Status and advancements of geospatial datasets and maps across the United States.}, journal = {Earth-science reviews}, volume = {235}, number = {}, pages = {1-24}, pmid = {36970305}, issn = {0012-8252}, support = {EPA999999/ImEPA/Intramural EPA/United States ; }, abstract = {Headwater streams and inland wetlands provide essential functions that support healthy watersheds and downstream waters. However, scientists and aquatic resource managers lack a comprehensive synthesis of national and state stream and wetland geospatial datasets and emerging technologies that can further improve these data. We conducted a review of existing United States (US) federal and state stream and wetland geospatial datasets, focusing on their spatial extent, permanence classifications, and current limitations. We also examined recent peer-reviewed literature for emerging methods that can potentially improve the estimation, representation, and integration of stream and wetland datasets. We found that federal and state datasets rely heavily on the US Geological Survey's National Hydrography Dataset for stream extent and duration information. Only eleven states (22%) had additional stream extent information and seven states (14%) provided additional duration information. Likewise, federal and state wetland datasets primarily use the US Fish and Wildlife Service's National Wetlands Inventory (NWI) Geospatial Dataset, with only two states using non-NWI datasets. Our synthesis revealed that LiDAR-based technologies hold promise for advancing stream and wetland mapping at limited spatial extents. While machine learning techniques may help to scale-up these LiDAR-derived estimates, challenges related to preprocessing and data workflows remain. High-resolution commercial imagery, supported by public imagery and cloud computing, may further aid characterization of the spatial and temporal dynamics of streams and wetlands, especially using multi-platform and multi-temporal machine learning approaches. Models integrating both stream and wetland dynamics are limited, and field-based efforts must remain a key component in developing improved headwater stream and wetland datasets. Continued financial and partnership support of existing databases is also needed to enhance mapping and inform water resources research and policy decisions.}, } @article {pmid36969371, year = {2023}, author = {Islam, MJ and Datta, R and Iqbal, A}, title = {Actual rating calculation of the zoom cloud meetings app using user reviews on google play store with sentiment annotation of BERT and hybridization of RNN and LSTM.}, journal = {Expert systems with applications}, volume = {223}, number = {}, pages = {119919}, pmid = {36969371}, issn = {0957-4174}, abstract = {The recent outbreaks of the COVID-19 forced people to work from home. All the educational institutes run their academic activities online. The online meeting app the "Zoom Cloud Meeting" provides the most entire supports for this purpose. For providing proper functionalities require in this situation of online supports the developers need the frequent release of new versions of the application. Which makes the chances to have lots of bugs during the release of new versions. To fix those bugs introduce developer needs users' feedback based on the new release of the application. But most of the time the ratings and reviews are created contraposition between them because of the users' inadvertent in giving ratings and reviews. And it has been the main problem to fix those bugs using user ratings for software developers. For this reason, we conduct this average rating calculation process based on the sentiment of user reviews to help software developers. We use BERT-based sentiment annotation to create unbiased datasets and hybridize RNN with LSTM to find calculated ratings based on the unbiased reviews dataset. Out of four models trained on four different datasets, we found promising performance in two datasets containing a necessarily large amount of unbiased reviews. The results show that the reviews have more positive sentiments than the actual ratings. Our results found an average of 3.60 stars rating, where the actual average rating found in dataset is 3.08 stars. We use reviews of more than 250 apps from the Google Play app store. The results of our can provide more promising if we can use a large dataset only containing the reviews of the Zoom Cloud Meeting app.}, } @article {pmid36967390, year = {2023}, author = {Camacho, C and Boratyn, GM and Joukov, V and Vera Alvarez, R and Madden, TL}, title = {ElasticBLAST: accelerating sequence search via cloud computing.}, journal = {BMC bioinformatics}, volume = {24}, number = {1}, pages = {117}, pmid = {36967390}, issn = {1471-2105}, mesh = {*Cloud Computing ; *Software ; Computational Biology/methods ; Databases, Factual ; Costs and Cost Analysis ; }, abstract = {BACKGROUND: Biomedical researchers use alignments produced by BLAST (Basic Local Alignment Search Tool) to categorize their query sequences. Producing such alignments is an essential bioinformatics task that is well suited for the cloud. The cloud can perform many calculations quickly as well as store and access large volumes of data. Bioinformaticians can also use it to collaborate with other researchers, sharing their results, datasets and even their pipelines on a common platform.

RESULTS: We present ElasticBLAST, a cloud native application to perform BLAST alignments in the cloud. ElasticBLAST can handle anywhere from a few to many thousands of queries and run the searches on thousands of virtual CPUs (if desired), deleting resources when it is done. It uses cloud native tools for orchestration and can request discounted instances, lowering cloud costs for users. It is supported on Amazon Web Services and Google Cloud Platform. It can search BLAST databases that are user provided or from the National Center for Biotechnology Information.

CONCLUSION: We show that ElasticBLAST is a useful application that can efficiently perform BLAST searches for the user in the cloud, demonstrating that with two examples. At the same time, it hides much of the complexity of working in the cloud, lowering the threshold to move work to the cloud.}, } @article {pmid36961920, year = {2023}, author = {Monteiro, MG and Pantani, D and Pinsky, I and Hernandes Rocha, TA}, title = {Using the Pan American Health Organization Digital Conversational Agent to Educate the Public on Alcohol Use and Health: Preliminary Analysis.}, journal = {JMIR formative research}, volume = {7}, number = {}, pages = {e43165}, pmid = {36961920}, issn = {2561-326X}, abstract = {BACKGROUND: There is widespread misinformation about the effects of alcohol consumption on health, which was amplified during the COVID-19 pandemic through social media and internet channels. Chatbots and conversational agents became an important piece of the World Health Organization (WHO) response during the COVID-19 pandemic to quickly disseminate evidence-based information related to COVID-19 and tobacco to the public. The Pan American Health Organization (PAHO) seized the opportunity to develop a conversational agent to talk about alcohol-related topics and therefore complement traditional forms of health education that have been promoted in the past.

OBJECTIVE: This study aimed to develop and deploy a digital conversational agent to interact with an unlimited number of users anonymously, 24 hours a day, about alcohol topics, including ways to reduce risks from drinking, that is accessible in several languages, at no cost, and through various devices.

METHODS: The content development was based on the latest scientific evidence on the impacts of alcohol on health, social norms about drinking, and data from the WHO and PAHO. The agent itself was developed through a nonexclusive license agreement with a private company (Soul Machines) and included Google Digital Flow ES as the natural language processing software and Amazon Web Services for cloud services. Another company was contracted to program all the conversations, following the technical advice of PAHO staff.

RESULTS: The conversational agent was named Pahola, and it was deployed on November 19, 2021, through the PAHO website after a launch event with high publicity. No identifiable data were used and all interactions were anonymous, and therefore, this was not considered research with human subjects. Pahola speaks in English, Spanish, and Portuguese and interacts anonymously with a potentially infinite number of users through various digital devices. Users were required to accept the terms and conditions to enable access to their camera and microphone to interact with Pahola. Pahola attracted good attention from the media and reached 1.6 million people, leading to 236,000 clicks on its landing page, mostly through mobile devices. Only 1532 users had a conversation after clicking to talk to Pahola. The average time users spent talking to Pahola was 5 minutes. Major dropouts were observed in different steps of the conversation flow. Some questions asked by users were not anticipated during programming and could not be answered.

CONCLUSIONS: Our findings showed several limitations to using a conversational agent for alcohol education to the general public. Improvements are needed to expand the content to make it more meaningful and engaging to the public. The potential of chatbots to educate the public on alcohol-related topics seems enormous but requires a long-term investment of resources and research to be useful and reach many more people.}, } @article {pmid36958108, year = {2023}, author = {Menghani, RR and Das, A and Kraft, RH}, title = {A sensor-enabled cloud-based computing platform for computational brain biomechanics.}, journal = {Computer methods and programs in biomedicine}, volume = {233}, number = {}, pages = {107470}, doi = {10.1016/j.cmpb.2023.107470}, pmid = {36958108}, issn = {1872-7565}, mesh = {*Cloud Computing ; Biomechanical Phenomena ; *Head ; Brain/physiology ; Software ; }, abstract = {BACKGROUND AND OBJECTIVES: Driven by the risk of repetitive head trauma, sensors have been integrated into mouthguards to measure head impacts in contact sports and military activities. These wearable devices, referred to as "instrumented" or "smart" mouthguards are being actively developed by various research groups and organizations. These instrumented mouthguards provide an opportunity to further study and understand the brain biomechanics due to impact. In this study, we present a brain modeling service that can use information from these sensors to predict brain injury metrics in an automated fashion.

METHODS: We have built a brain modeling platform using several of Amazon's Web Services (AWS) to enable cloud computing and scalability. We use a custom-built cloud-based finite element modeling code to compute the physics-based nonlinear response of the intracranial brain tissue and provide a frontend web application and an application programming interface for groups working on head impact sensor technology to include simulated injury predictions into their research pipeline.

RESULTS: The platform results have been validated against experimental data available in literature for brain-skull relative displacements, brain strains and intracranial pressure. The parallel processing capability of the platform has also been tested and verified. We also studied the accuracy of the custom head surfaces generated by Avatar 3D.

CONCLUSION: We present a validated cloud-based computational brain modeling platform that uses sensor data as input for numerical brain models and outputs a quantitative description of brain tissue strains and injury metrics. The platform is expected to generate transparent, reproducible, and traceable brain computing results.}, } @article {pmid36950362, year = {2023}, author = {Gonzalez, EM and Zarei, A and Hendler, N and Simmons, T and Zarei, A and Demieville, J and Strand, R and Rozzi, B and Calleja, S and Ellingson, H and Cosi, M and Davey, S and Lavelle, DO and Truco, MJ and Swetnam, TL and Merchant, N and Michelmore, RW and Lyons, E and Pauli, D}, title = {PhytoOracle: Scalable, modular phenomics data processing pipelines.}, journal = {Frontiers in plant science}, volume = {14}, number = {}, pages = {1112973}, pmid = {36950362}, issn = {1664-462X}, abstract = {As phenomics data volume and dimensionality increase due to advancements in sensor technology, there is an urgent need to develop and implement scalable data processing pipelines. Current phenomics data processing pipelines lack modularity, extensibility, and processing distribution across sensor modalities and phenotyping platforms. To address these challenges, we developed PhytoOracle (PO), a suite of modular, scalable pipelines for processing large volumes of field phenomics RGB, thermal, PSII chlorophyll fluorescence 2D images, and 3D point clouds. PhytoOracle aims to (i) improve data processing efficiency; (ii) provide an extensible, reproducible computing framework; and (iii) enable data fusion of multi-modal phenomics data. PhytoOracle integrates open-source distributed computing frameworks for parallel processing on high-performance computing, cloud, and local computing environments. Each pipeline component is available as a standalone container, providing transferability, extensibility, and reproducibility. The PO pipeline extracts and associates individual plant traits across sensor modalities and collection time points, representing a unique multi-system approach to addressing the genotype-phenotype gap. To date, PO supports lettuce and sorghum phenotypic trait extraction, with a goal of widening the range of supported species in the future. At the maximum number of cores tested in this study (1,024 cores), PO processing times were: 235 minutes for 9,270 RGB images (140.7 GB), 235 minutes for 9,270 thermal images (5.4 GB), and 13 minutes for 39,678 PSII images (86.2 GB). These processing times represent end-to-end processing, from raw data to fully processed numerical phenotypic trait data. Repeatability values of 0.39-0.95 (bounding area), 0.81-0.95 (axis-aligned bounding volume), 0.79-0.94 (oriented bounding volume), 0.83-0.95 (plant height), and 0.81-0.95 (number of points) were observed in Field Scanalyzer data. We also show the ability of PO to process drone data with a repeatability of 0.55-0.95 (bounding area).}, } @article {pmid36949901, year = {2023}, author = {Cossío, F and Schurz, H and Engström, M and Barck-Holst, C and Tsirikoglou, A and Lundström, C and Gustafsson, H and Smith, K and Zackrisson, S and Strand, F}, title = {VAI-B: a multicenter platform for the external validation of artificial intelligence algorithms in breast imaging.}, journal = {Journal of medical imaging (Bellingham, Wash.)}, volume = {10}, number = {6}, pages = {061404}, pmid = {36949901}, issn = {2329-4302}, abstract = {PURPOSE: Multiple vendors are currently offering artificial intelligence (AI) computer-aided systems for triage detection, diagnosis, and risk prediction of breast cancer based on screening mammography. There is an imminent need to establish validation platforms that enable fair and transparent testing of these systems against external data.

APPROACH: We developed validation of artificial intelligence for breast imaging (VAI-B), a platform for independent validation of AI algorithms in breast imaging. The platform is a hybrid solution, with one part implemented in the cloud and another in an on-premises environment at Karolinska Institute. Cloud services provide the flexibility of scaling the computing power during inference time, while secure on-premises clinical data storage preserves their privacy. A MongoDB database and a python package were developed to store and manage the data on-premises. VAI-B requires four data components: radiological images, AI inferences, radiologist assessments, and cancer outcomes.

RESULTS: To pilot test VAI-B, we defined a case-control population based on 8080 patients diagnosed with breast cancer and 36,339 healthy women based on the Swedish national quality registry for breast cancer. Images and radiological assessments from more than 100,000 mammography examinations were extracted from hospitals in three regions of Sweden. The images were processed by AI systems from three vendors in a virtual private cloud to produce abnormality scores related to signs of cancer in the images. A total of 105,706 examinations have been processed and stored in the database.

CONCLUSIONS: We have created a platform that will allow downstream evaluation of AI systems for breast cancer detection, which enables faster development cycles for participating vendors and safer AI adoption for participating hospitals. The platform was designed to be scalable and ready to be expanded should a new vendor want to evaluate their system or should a new hospital wish to obtain an evaluation of different AI systems on their images.}, } @article {pmid36947346, year = {2023}, author = {Abler, D and Schaer, R and Oreiller, V and Verma, H and Reichenbach, J and Aidonopoulos, O and Evéquoz, F and Jreige, M and Prior, JO and Depeursinge, A}, title = {QuantImage v2: a comprehensive and integrated physician-centered cloud platform for radiomics and machine learning research.}, journal = {European radiology experimental}, volume = {7}, number = {1}, pages = {16}, pmid = {36947346}, issn = {2509-9280}, support = {205320/179069//Schweizerischer Nationalfonds zur Förderung der Wissenschaftlichen Forschung/ ; IMAGINE//Swiss Personalized Health Network (SPHN)/ ; MSXplain//Hasler Stiftung/ ; EPICS//Hasler Stiftung/ ; QA4IQI//Swiss Personalized Health Network (SPHN)/ ; }, mesh = {*Radiology/instrumentation/methods ; *Computational Biology ; *Cloud Computing ; Research ; Software ; Models, Theoretical ; Forecasting ; Carcinoma/diagnostic imaging ; Lung Neoplasms/diagnostic imaging ; Humans ; Machine Learning ; }, abstract = {BACKGROUND: Radiomics, the field of image-based computational medical biomarker research, has experienced rapid growth over the past decade due to its potential to revolutionize the development of personalized decision support models. However, despite its research momentum and important advances toward methodological standardization, the translation of radiomics prediction models into clinical practice only progresses slowly. The lack of physicians leading the development of radiomics models and insufficient integration of radiomics tools in the clinical workflow contributes to this slow uptake.

METHODS: We propose a physician-centered vision of radiomics research and derive minimal functional requirements for radiomics research software to support this vision. Free-to-access radiomics tools and frameworks were reviewed to identify best practices and reveal the shortcomings of existing software solutions to optimally support physician-driven radiomics research in a clinical environment.

RESULTS: Support for user-friendly development and evaluation of radiomics prediction models via machine learning was found to be missing in most tools. QuantImage v2 (QI2) was designed and implemented to address these shortcomings. QI2 relies on well-established existing tools and open-source libraries to realize and concretely demonstrate the potential of a one-stop tool for physician-driven radiomics research. It provides web-based access to cohort management, feature extraction, and visualization and supports "no-code" development and evaluation of machine learning models against patient-specific outcome data.

CONCLUSIONS: QI2 fills a gap in the radiomics software landscape by enabling "no-code" radiomics research, including model validation, in a clinical environment. Further information about QI2, a public instance of the system, and its source code is available at https://medgift.github.io/quantimage-v2-info/ . Key points As domain experts, physicians play a key role in the development of radiomics models. Existing software solutions do not support physician-driven research optimally. QuantImage v2 implements a physician-centered vision for radiomics research. QuantImage v2 is a web-based, "no-code" radiomics research platform.}, } @article {pmid38665413, year = {2023}, author = {Khazali, M and Lechner, W}, title = {Scalable quantum processors empowered by the Fermi scattering of Rydberg electrons.}, journal = {Communications physics}, volume = {6}, number = {1}, pages = {57}, pmid = {38665413}, issn = {2399-3650}, abstract = {Quantum computing promises exponential speed-up compared to its classical counterpart. While the neutral atom processors are the pioneering platform in terms of scalability, the dipolar Rydberg gates impose the main bottlenecks on the scaling of these devices. This article presents an alternative scheme for neutral atom quantum processing, based on the Fermi scattering of a Rydberg electron from ground-state atoms in spin-dependent lattice geometries. Instead of relying on Rydberg pair-potentials, the interaction is controlled by engineering the electron cloud of a sole Rydberg atom. The present scheme addresses the scaling obstacles in Rydberg processors by exponentially suppressing the population of short-lived states and by operating in ultra-dense atomic lattices. The restoring forces in molecule type Rydberg-Fermi potential preserve the trapping over a long interaction period. Furthermore, the proposed scheme mitigates different competing infidelity criteria, eliminates unwanted cross-talks, and significantly suppresses the operation depth in running complicated quantum algorithms.}, } @article {pmid37954189, year = {2023}, author = {Possik, J and Asgary, A and Solis, AO and Zacharewicz, G and Shafiee, MA and Najafabadi, MM and Nadri, N and Guimaraes, A and Iranfar, H and Ma, P and Lee, CM and Tofighi, M and Aarabi, M and Gorecki, S and Wu, J}, title = {An Agent-Based Modeling and Virtual Reality Application Using Distributed Simulation: Case of a COVID-19 Intensive Care Unit.}, journal = {IEEE transactions on engineering management}, volume = {70}, number = {8}, pages = {2931-2943}, pmid = {37954189}, issn = {0018-9391}, abstract = {Hospitals and other healthcare settings use various simulation methods to improve their operations, management, and training. The COVID-19 pandemic, with the resulting necessity for rapid and remote assessment, has highlighted the critical role of modeling and simulation in healthcare, particularly distributed simulation (DS). DS enables integration of heterogeneous simulations to further increase the usability and effectiveness of individual simulations. This article presents a DS system that integrates two different simulations developed for a hospital intensive care unit (ICU) ward dedicated to COVID-19 patients. AnyLogic has been used to develop a simulation model of the ICU ward using agent-based and discrete event modeling methods. This simulation depicts and measures physical contacts between healthcare providers and patients. The Unity platform has been utilized to develop a virtual reality simulation of the ICU environment and operations. The high-level architecture, an IEEE standard for DS, has been used to build a cloud-based DS system by integrating and synchronizing the two simulation platforms. While enhancing the capabilities of both simulations, the DS system can be used for training purposes and assessment of different managerial and operational decisions to minimize contacts and disease transmission in the ICU ward by enabling data exchange between the two simulations.}, } @article {pmid38603290, year = {2022}, author = {Dadash Pour, P and Nazzal, MA and Darras, BM}, title = {The role of industry 4.0 technologies in overcoming pandemic challenges for the manufacturing sector.}, journal = {Concurrent engineering, research, and applications}, volume = {30}, number = {2}, pages = {190-205}, pmid = {38603290}, issn = {1063-293X}, abstract = {Industry 4.0 aims to revolutionize the manufacturing sector to achieve sustainable and efficient production. The novel coronavirus pandemic has brought many challenges in different industries globally. Shortage in supply of raw material, changes in product demand, and factories closures due to general lockdown are all examples of such challenges. The adaption of Industry 4.0 technologies can address these challenges and prevent their recurrence in case of another pandemic outbreak in future. A prominent advantage of Industry 4.0 technologies is their capability of building resilient and flexible systems that are responsive to exceptional circumstances such as unpredictable market demand, supply chain interruptions, and manpower shortage which can be crucial at times of pandemics. This work focuses on discussing how different Industry 4.0 technologies such as Cyber Physical Systems, Additive Manufacturing, and Internet of Things can help the manufacturing sector overcome pandemics challenges. The role of Industry 4.0 technologies in raw material provenance identification and counterfeit prevention, collaboration and business continuity, agility and decentralization of manufacturing, crisis simulation, elimination of single point of failure risk, and other factors is discussed. Moreover, a self-assessment readiness model has been developed to help manufacturing firms determine their readiness level for implementing different Industry 4.0 technologies.}, } @article {pmid39635183, year = {2022}, author = {Gui, Y and Nouri, BM and Miscuglio, M and Amin, R and Wang, H and Khurgin, JB and Dalir, H and Sorger, VJ}, title = {100 GHz micrometer-compact broadband monolithic ITO Mach-Zehnder interferometer modulator enabling 3500 times higher packing density.}, journal = {Nanophotonics (Berlin, Germany)}, volume = {11}, number = {17}, pages = {4001-4009}, pmid = {39635183}, issn = {2192-8614}, abstract = {Electro-optic modulators provide a key function in optical transceivers and increasingly in photonic programmable application-specific integrated circuits (ASICs) for machine learning and signal processing. However, both foundry-ready silicon-based modulators and conventional material-based devices utilizing lithium-niobate fall short in simultaneously providing high chip packaging density and fast speed. Current-driven ITO-based modulators have the potential to achieve both enabled by efficient light-matter interactions. Here, we introduce micrometer-compact Mach-Zehnder interferometer (MZI)-based modulators capable of exceeding 100 GHz switching rates. Integrating ITO-thin films atop a photonic waveguide, one can achieve an efficient V π L = 0.1 V mm, spectrally broadband, and compact MZI phase shifter. Remarkably, this allows integrating more than 3500 of these modulators within the same chip area as only one single silicon MZI modulator. The modulator design introduced here features a holistic photonic, electronic, and RF-based optimization and includes an asymmetric MZI tuning step to optimize the extinction ratio (ER)-to-insertion loss (IL) and dielectric thickness sweep to balance the trade-offs between ER and speed. Driven by CMOS compatible bias voltage levels, this device is the first to address next-generation modulator demands for processors of the machine intelligence revolution, in addition to the edge and cloud computing demands as well as optical transceivers alike.}, } @article {pmid37378273, year = {2023}, author = {Guo, G and Sun, Y and Qian, G and Wang, Q}, title = {LIC criterion for optimal subset selection in distributed interval estimation.}, journal = {Journal of applied statistics}, volume = {50}, number = {9}, pages = {1900-1920}, pmid = {37378273}, issn = {0266-4763}, abstract = {Distributed interval estimation in linear regression may be computationally infeasible in the presence of big data that are normally stored in different computer servers or in cloud. The existing challenge represents the results from the distributed estimation may still contain redundant information about the population characteristics of the data. To tackle this computing challenge, we develop an optimization procedure to select the best subset from the collection of data subsets, based on which we perform interval estimation in the context of linear regression. The procedure is derived based on minimizing the length of the final interval estimator and maximizing the information remained in the selected data subset, thus is named as the LIC criterion. Theoretical performance of the LIC criterion is studied in this paper together with a simulation study and real data analysis.}, } @article {pmid38013884, year = {2022}, author = {Rogage, K and Mahamedi, E and Brilakis, I and Kassem, M}, title = {Beyond digital shadows: Digital Twin used for monitoring earthwork operation in large infrastructure projects.}, journal = {AI in civil engineering}, volume = {1}, number = {1}, pages = {7}, pmid = {38013884}, issn = {2730-5392}, abstract = {Current research on Digital Twin (DT) is largely focused on the performance of built assets in their operational phases as well as on urban environment. However, Digital Twin has not been given enough attention to construction phases, for which this paper proposes a Digital Twin framework for the construction phase, develops a DT prototype and tests it for the use case of measuring the productivity and monitoring of earthwork operation. The DT framework and its prototype are underpinned by the principles of versatility, scalability, usability and automation to enable the DT to fulfil the requirements of large-sized earthwork projects and the dynamic nature of their operation. Cloud computing and dashboard visualisation were deployed to enable automated and repeatable data pipelines and data analytics at scale and to provide insights in near-real time. The testing of the DT prototype in a motorway project in the Northeast of England successfully demonstrated its ability to produce key insights by using the following approaches: (i) To predict equipment utilisation ratios and productivities; (ii) To detect the percentage of time spent on different tasks (i.e., loading, hauling, dumping, returning or idling), the distance travelled by equipment over time and the speed distribution; and (iii) To visualise certain earthwork operations.}, } @article {pmid38620743, year = {2021}, author = {Poonia, A and Ghosh, S and Ghosh, A and Nath, SB and Ghosh, SK and Buyya, R}, title = {CONFRONT: Cloud-fog-dew based monitoring framework for COVID-19 management.}, journal = {Internet of things (Amsterdam, Netherlands)}, volume = {16}, number = {}, pages = {100459}, pmid = {38620743}, issn = {2542-6605}, abstract = {In the recent times, the IoT (Internet of Things) enabled devices and applications have seen a rapid growth in various sectors including healthcare. The ability of low-cost connected sensors to cover large areas makes it a potential tool in the fight against pandemics, like COVID-19. The COVID-19 has posed a formidable challenge for the developing countries, like India, which need to cater to large population base with limited health infrastructure. In this paper, we proposed a Cloud-fog-dew based mOnitoriNg Framework foR cOvid-19 maNagemenT, called CONFRONT. This cloud-fog-dew based healthcare model may help in preliminary diagnosis and also in monitoring patients while they are in quarantine facilities or home based treatments. The fog architecture ensures that the model is suited for real-time scenarios while keeping the bandwidth requirements low. To analyse large scale COVID-19 statistics data for extracting aggregate information of the disease spread, the cloud servers are leveraged due to its scalable computational and storage capabilities. The dew architecture ensures that the application is available at a limited scale even when cloud connectivity is lost, leading to a faster uptime for the application. A low cost wearable device consisting of heterogeneous sensors has also been designed and fabricated to realize the proposed framework.}, } @article {pmid37645133, year = {2021}, author = {Brandolini, F and Domingo-Ribas, G and Zerboni, A and Turner, S}, title = {A Google Earth Engine-enabled Python approach for the identification of anthropogenic palaeo-landscape features.}, journal = {Open research Europe}, volume = {1}, number = {}, pages = {22}, pmid = {37645133}, issn = {2732-5121}, abstract = {The necessity of sustainable development for landscapes has emerged as an important theme in recent decades. Current methods take a holistic approach to landscape heritage and promote an interdisciplinary dialogue to facilitate complementary landscape management strategies. With the socio-economic values of the "natural" and "cultural" landscape heritage increasingly recognised worldwide, remote sensing tools are being used more and more to facilitate the recording and management of landscape heritage. The advent of freeware cloud computing services has enabled significant improvements in landscape research allowing the rapid exploration and processing of satellite imagery such as the Landsat and Copernicus Sentinel datasets. This research represents one of the first applications of the Google Earth Engine (GEE) Python application programming interface (API) in studies of historic landscapes. The complete free and open-source software (FOSS) cloud protocol proposed here consists of a Python code script developed in Google Colab, which could be adapted and replicated in different areas of the world. A multi-temporal approach has been adopted to investigate the potential of Sentinel-2 satellite imagery to detect buried hydrological and anthropogenic features along with spectral index and spectral decomposition analysis. The protocol's effectiveness in identifying palaeo-riverscape features has been tested in the Po Plain (N Italy).}, } @article {pmid38907377, year = {2021}, author = {Tonti, S and Marzolini, B and Bulgheroni, M}, title = {Smartphone-Based Passive Sensing for Behavioral and Physical Monitoring in Free-Life Conditions: Technical Usability Study.}, journal = {JMIR biomedical engineering}, volume = {6}, number = {2}, pages = {e15417}, pmid = {38907377}, issn = {2561-3278}, abstract = {BACKGROUND: Smartphone use is widely spreading in society. Their embedded functions and sensors may play an important role in therapy monitoring and planning. However, the use of smartphones for intrapersonal behavioral and physical monitoring is not yet fully supported by adequate studies addressing technical reliability and acceptance.

OBJECTIVE: The objective of this paper is to identify and discuss technical issues that may impact on the wide use of smartphones as clinical monitoring tools. The focus is on the quality of the data and transparency of the acquisition process.

METHODS: QuantifyMyPerson is a platform for continuous monitoring of smartphone use and embedded sensors data. The platform consists of an app for data acquisition, a backend cloud server for data storage and processing, and a web-based dashboard for data management and visualization. The data processing aims to extract meaningful features for the description of daily life such as phone status, calls, app use, GPS, and accelerometer data. A total of health subjects installed the app on their smartphones, running it for 7 months. The acquired data were analyzed to assess impact on smartphone performance (ie, battery consumption and anomalies in functioning) and data integrity. Relevance of the selected features in describing changes in daily life was assessed through the computation of a k-nearest neighbors global anomaly score to detect days that differ from others.

RESULTS: The effectiveness of smartphone-based monitoring depends on the acceptability and interoperability of the system as user retention and data integrity are key aspects. Acceptability was confirmed by the full transparency of the app and the absence of any conflicts with daily smartphone use. The only perceived issue was the battery consumption even though the trend of battery drain with and without the app running was comparable. Regarding interoperability, the app was successfully installed and run on several Android brands. The study shows that some smartphone manufacturers implement power-saving policies not allowing continuous sensor data acquisition and impacting integrity. Data integrity was 96% on smartphones whose power-saving policies do not impact the embedded sensor management and 84% overall.

CONCLUSIONS: The main technological barriers to continuous behavioral and physical monitoring (ie, battery consumption and power-saving policies of manufacturers) may be overcome. Battery consumption increase is mainly due to GPS triangulation and may be limited, while data missing because of power-saving policies are related only to periods of nonuse of the phone since the embedded sensors are reactivated by any smartphone event. Overall, smartphone-based passive sensing is fully feasible and scalable despite the Android market fragmentation.}, } @article {pmid38620326, year = {2021}, author = {Gupta, D and Bhatt, S and Gupta, M and Tosun, AS}, title = {Future Smart Connected Communities to Fight COVID-19 Outbreak.}, journal = {Internet of things (Amsterdam, Netherlands)}, volume = {13}, number = {}, pages = {100342}, pmid = {38620326}, issn = {2542-6605}, abstract = {Internet of Things (IoT) has grown rapidly in the last decade and continues to develop in terms of dimension and complexity, offering a wide range of devices to support a diverse set of applications. With ubiquitous Internet, connected sensors and actuators, networking and communication technology along with artificial intelligence (AI), smart cyber-physical systems (CPS) provide services rendering assistance and convenience to humans in their daily lives. However, the recent outbreak of COVID-19 (also known as coronavirus) pandemic has exposed and highlighted the limitations of contemporary technological deployments especially to contain the widespread of this disease. IoT and smart connected technologies together with data-driven applications can play a crucial role not only in the prevention, mitigation, or continuous remote monitoring of patients, but also enable prompt enforcement of guidelines, rules, and administrative orders to contain such future outbreaks. In this paper, we envision an IoT and data-supported connected ecosystem designed for intelligent monitoring, pro-active prevention and control, and mitigation of COVID-19 and similar epidemics. We propose a gamut of synergistic applications and technology systems for various smart infrastructures including E-Health, smart home, supply chain management, transportation, and city, which will work in convergence to develop 'pandemic-proof' future smart communities. We also present a generalized cloud-enabled IoT implementation framework along with scientific solutions, which can be adapted and extended to deploy smart connected ecosystem scenarios using widely used Amazon Web Services (AWS) cloud infrastructures. In addition, we also implement an E-Health RPM use case scenario to demonstrate the need and practicality for smart connected communities. Finally, we highlight challenges and research directions that need thoughtful consideration and across the board cooperation among stakeholders to build resilient communities against future pandemics.}, } @article {pmid40213581, year = {2021}, author = {Hodkiewicz, M and Lukens, S and Brundage, MP and Sexton, T}, title = {Rethinking Maintenance Terminology for an Industry 4.0 Future.}, journal = {International journal of prognostics and health management}, volume = {12}, number = {1}, pages = {}, pmid = {40213581}, issn = {2153-2648}, support = {9999-NIST/ImNIST/Intramural NIST DOC/United States ; }, abstract = {Sensors and mathematical models have been used since the 1990's to assess the health of systems and diagnose anomalous behavior. The advent of the Internet of Things (IoT) increases the range of assets on which data can be collected cost effectively. Cloud-computing and the wider availability of data and models are democratizing the implementation of prognostic health (PHM) technologies. Together, these advancements and other Industry 4.0 developments are creating a paradigm shift in how maintenance work is planned and executed. In this new future, maintenance will be initiated once a potential failure has been detected (using PHM) and thus completed before a functional failure has occurred. Thus corrective work is required since corrective work is defined as "work done to restore the function of an asset after failure or when failure is imminent." Many metrics for measuring the effectiveness of maintenance work management are grounded in a negative perspective of corrective work and do not clearly capture work arising from condition monitoring and predictive modeling investments. In this paper, we use case studies to demonstrate the need to rethink maintenance terminology. The outcomes of this work include 1) definitions to be used for consistent evaluation of work management performance in an Industry 4.0 future and 2) recommendations to improve detection of work related to PHM activities.}, } @article {pmid38217155, year = {2021}, author = {Raucci, U and Valentini, A and Pieri, E and Weir, H and Seritan, S and Martínez, TJ}, title = {Voice-controlled quantum chemistry.}, journal = {Nature computational science}, volume = {1}, number = {1}, pages = {42-45}, pmid = {38217155}, issn = {2662-8457}, support = {N00014-18-1-2624//United States Department of Defense | United States Navy | Office of Naval Research (ONR)/ ; N00014-18-1-2659//United States Department of Defense | United States Navy | Office of Naval Research (ONR)/ ; N00014-18-1-2659//United States Department of Defense | United States Navy | Office of Naval Research (ONR)/ ; N00014-18-1-2659//United States Department of Defense | United States Navy | Office of Naval Research (ONR)/ ; N00014-16-1-2557//United States Department of Defense | United States Navy | Office of Naval Research (ONR)/ ; N00014-18-1-2659//United States Department of Defense | United States Navy | Office of Naval Research (ONR)/ ; N00014-18-1-2624//United States Department of Defense | United States Navy | Office of Naval Research (ONR)/ ; N00014-16-1-2557//United States Department of Defense | United States Navy | Office of Naval Research (ONR)/ ; }, abstract = {Over the past decade, artificial intelligence has been propelled forward by advances in machine learning algorithms and computational hardware, opening up myriads of new avenues for scientific research. Nevertheless, virtual assistants and voice control have yet to be widely used in the natural sciences. Here, we present ChemVox, an interactive Amazon Alexa skill that uses speech recognition to perform quantum chemistry calculations. This new application interfaces Alexa with cloud computing and returns the results through a capable device. ChemVox paves the way to making computational chemistry routinely accessible to the wider community.}, } @article {pmid38620477, year = {2020}, author = {Tuli, S and Tuli, S and Tuli, R and Gill, SS}, title = {Predicting the growth and trend of COVID-19 pandemic using machine learning and cloud computing.}, journal = {Internet of things (Amsterdam, Netherlands)}, volume = {11}, number = {}, pages = {100222}, pmid = {38620477}, issn = {2542-6605}, abstract = {The outbreak of COVID-19 Coronavirus, namely SARS-CoV-2, has created a calamitous situation throughout the world. The cumulative incidence of COVID-19 is rapidly increasing day by day. Machine Learning (ML) and Cloud Computing can be deployed very effectively to track the disease, predict growth of the epidemic and design strategies and policies to manage its spread. This study applies an improved mathematical model to analyse and predict the growth of the epidemic. An ML-based improved model has been applied to predict the potential threat of COVID-19 in countries worldwide. We show that using iterative weighting for fitting Generalized Inverse Weibull distribution, a better fit can be obtained to develop a prediction framework. This has been deployed on a cloud computing platform for more accurate and real-time prediction of the growth behavior of the epidemic. A data driven approach with higher accuracy as here can be very useful for a proactive response from the government and citizens. Finally, we propose a set of research opportunities and setup grounds for further practical applications.}, } @article {pmid38116301, year = {2020}, author = {He, X and Lin, X}, title = {Challenges and Opportunities in Statistics and Data Science: Ten Research Areas.}, journal = {Harvard data science review}, volume = {2}, number = {3}, pages = {}, pmid = {38116301}, issn = {2644-2353}, support = {R35 CA197449/CA/NCI NIH HHS/United States ; U01 HG009088/HG/NHGRI NIH HHS/United States ; U19 CA203654/CA/NCI NIH HHS/United States ; }, abstract = {As a discipline that deals with many aspects of data, statistics is a critical pillar in the rapidly evolving landscape of data science. The increasingly vital role of data, especially big data, in many applications, presents the field of statistics with unparalleled challenges and exciting opportunities. Statistics plays a pivotal role in data science by assisting with the use of data and decision making in the face of uncertainty. In this article, we present ten research areas that could make statistics and data science more impactful on science and society. Focusing on these areas will help better transform data into knowledge, actionable insights and deliverables, and promote more collaboration with computer and other quantitative scientists and domain scientists.}, } @article {pmid37981900, year = {2020}, author = {Jayathilaka, H and Krintz, C and Wolski, R}, title = {Detecting Performance Anomalies in Cloud Platform Applications.}, journal = {IEEE transactions on cloud computing}, volume = {8}, number = {3}, pages = {764-777}, pmid = {37981900}, issn = {2168-7161}, support = {R01 EB014877/EB/NIBIB NIH HHS/United States ; }, abstract = {We present Roots, a full-stack monitoring and analysis system for performance anomaly detection and bottleneck identification in cloud platform-as-a-service (PaaS) systems. Roots facilitates application performance monitoring as a core capability of PaaS clouds, and relieves the developers from having to instrument application code. Roots tracks HTTP/S requests to hosted cloud applications and their use of PaaS services. To do so it employs lightweight monitoring of PaaS service interfaces. Roots processes this data in the background using multiple statistical techniques that in combination detect performance anomalies (i.e. violations of service-level objectives). For each anomaly, Roots determines whether the event was caused by a change in the request workload or by a performance bottleneck in a PaaS service. By correlating data collected across different layers of the PaaS, Roots is able to trace high-level performance anomalies to bottlenecks in specific components in the cloud platform. We implement Roots using the AppScale PaaS and evaluate its overhead and accuracy.}, } @article {pmid38486787, year = {2020}, author = {Liang, F and Yu, W and Liu, X and Griffith, D and Golmie, N}, title = {Towards Edge-Based Deep Learning in Industrial Internet of Things.}, journal = {IEEE internet of things journal}, volume = {7}, number = {5}, pages = {}, pmid = {38486787}, issn = {2327-4662}, support = {9999-NIST/ImNIST/Intramural NIST DOC/United States ; }, abstract = {As a typical application of the Internet of Things (IoT), the Industrial Internet of Things (IIoT) connects all the related IoT sensing and actuating devices ubiquitously so that the monitoring and control of numerous industrial systems can be realized. Deep learning, as one viable way to carry out big data-driven modeling and analysis, could be integrated in IIoT systems to aid the automation and intelligence of IIoT systems. As deep learning requires large computation power, it is commonly deployed in cloud servers. Thus, the data collected by IoT devices must be transmitted to the cloud for training process, contributing to network congestion and affecting the IoT network performance as well as the supported applications. To address this issue, in this paper we leverage fog/edge computing paradigm and propose an edge computing-based deep learning model, which utilizes edge computing to migrate the deep learning process from cloud servers to edge nodes, reducing data transmission demands in the IIoT network and mitigating network congestion. Since edge nodes have limited computation ability compared to servers, we design a mechanism to optimize the deep learning model so that its requirements for computational power can be reduced. To evaluate our proposed solution, we design a testbed implemented in the Google cloud and deploy the proposed Convolutional Neural Network (CNN) model, utilizing a real-world IIoT dataset to evaluate our approach. Our experimental results confirm the effectiveness of our approach, which can not only reduce the network traffic overhead for IIoT, but also maintain the classification accuracy in comparison with several baseline schemes.}, } @article {pmid37309413, year = {2019}, author = {Liu, DM and Salganik, MJ}, title = {Successes and Struggles with Computational Reproducibility: Lessons from the Fragile Families Challenge.}, journal = {Socius : sociological research for a dynamic world}, volume = {5}, number = {}, pages = {}, pmid = {37309413}, issn = {2378-0231}, support = {R01 HD039135/HD/NICHD NIH HHS/United States ; R24 HD047879/HD/NICHD NIH HHS/United States ; P2C HD047879/HD/NICHD NIH HHS/United States ; R01 HD036916/HD/NICHD NIH HHS/United States ; R01 HD040421/HD/NICHD NIH HHS/United States ; }, abstract = {Reproducibility is fundamental to science, and an important component of reproducibility is computational reproducibility: the ability of a researcher to recreate the results of a published study using the original author's raw data and code. Although most people agree that computational reproducibility is important, it is still difficult to achieve in practice. In this article, the authors describe their approach to enabling computational reproducibility for the 12 articles in this special issue of Socius about the Fragile Families Challenge. The approach draws on two tools commonly used by professional software engineers but not widely used by academic researchers: software containers (e.g., Docker) and cloud computing (e.g., Amazon Web Services). These tools made it possible to standardize the computing environment around each submission, which will ease computational reproducibility both today and in the future. Drawing on their successes and struggles, the authors conclude with recommendations to researchers and journals.}, } @article {pmid38680952, year = {2016}, author = {Iorga, M and Scarfone, K}, title = {Using a Capability Oriented Methodology to Build Your Cloud Ecosystem.}, journal = {IEEE cloud computing}, volume = {3}, number = {2}, pages = {}, doi = {10.1109/mcc.2016.38}, pmid = {38680952}, issn = {2325-6095}, support = {9999-NIST/ImNIST/Intramural NIST DOC/United States ; }, abstract = {Organizations often struggle to capture the necessary functional capabilities for each cloud-based solution adopted for their information systems. Identifying, defining, selecting, and prioritizing these functional capabilities and the security components that implement and enforce them is surprisingly challenging. This article explains recent developments by the National Institute of Standards and Technology (NIST) in addressing these challenges. The article focuses on the capability oriented methodology for orchestrating a secure cloud ecosystem proposed as part of the NIST Cloud Computing Security Reference Architecture. The methodology recognizes that risk may vary for cloud Actors within a single ecosystem, so it takes a risk-based approach to functional capabilities. The result is an assessment of which cloud Actor is responsible for implementing each security component and how implementation should be prioritized. A cloud Actor, especially a cloud Consumer, that follows the methodology can more easily make well-informed decisions regarding their cloud ecosystems.}, } @article {pmid36944981, year = {2023}, author = {Varesio, C and De Giorgis, V and Veggiotti, P and Nardocci, N and Granata, T and Ragona, F and Pasca, L and Mensi, MM and Borgatti, R and Olivotto, S and Previtali, R and Riva, A and Mancardi, MM and Striano, P and Cavallin, M and Guerrini, R and Operto, FF and Pizzolato, A and Di Maulo, R and Martino, F and Lodi, A and Marini, C}, title = {GLUT1-DS Italian registry: past, present, and future: a useful tool for rare disorders.}, journal = {Orphanet journal of rare diseases}, volume = {18}, number = {1}, pages = {63}, pmid = {36944981}, issn = {1750-1172}, mesh = {Female ; Humans ; Male ; *Glucose Transporter Type 1/deficiency ; Italy ; Prospective Studies ; *Rare Diseases ; Registries ; Retrospective Studies ; Infant ; }, abstract = {BACKGROUND: GLUT1 deficiency syndrome is a rare, genetically determined neurological disorder for which Ketogenic Dietary Treatment represents the gold standard and lifelong treatment. Patient registries are powerful tools providing insights and real-world data on rare diseases.

OBJECTIVE: To describe the implementation of a national web-based registry for GLUT1-DS.

METHODS: This is a retrospective and prospective, multicenter, observational registry developed in collaboration with the Italian GLUT1-DS association and based on an innovative, flexible and configurable cloud computing technology platform, structured according to the most rigorous requirements for the management of patient's sensitive data. The Glut1 Registry collects baseline and follow-up data on the patient's demographics, history, symptoms, genotype, clinical, and instrumental evaluations and therapies.

RESULTS: Five Centers in Italy joined the registry, and two more Centers are currently joining. In the first two years of running, data from 67 patients (40 females and 27 males) have been collected. Age at symptom onset was within the first year of life in most (40, 60%) patients. The diagnosis was formulated in infancy in almost half of the cases (34, 51%). Symptoms at onset were mainly paroxysmal (mostly epileptic seizure and paroxysmal ocular movement disorder) or mixed paroxysmal and fixed symptoms (mostly psychomotor delay). Most patients (53, 79%) are currently under Ketogenic dietary treatments.

CONCLUSIONS: We describe the principles behind the design, development, and deployment of the web-based nationwide GLUT1-DS registry. It represents a stepping stone towards a more comprehensive understanding of the disease from onset to adulthood. It also represents a virtuous model from a technical, legal, and organizational point of view, thus representing a possible paradigmatic example for other rare disease registry implementation.}, } @article {pmid36937654, year = {2023}, author = {Selvarajan, S and Srivastava, G and Khadidos, AO and Khadidos, AO and Baza, M and Alshehri, A and Lin, JC}, title = {An artificial intelligence lightweight blockchain security model for security and privacy in IIoT systems.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {12}, number = {1}, pages = {38}, pmid = {36937654}, issn = {2192-113X}, abstract = {The Industrial Internet of Things (IIoT) promises to deliver innovative business models across multiple domains by providing ubiquitous connectivity, intelligent data, predictive analytics, and decision-making systems for improved market performance. However, traditional IIoT architectures are highly susceptible to many security vulnerabilities and network intrusions, which bring challenges such as lack of privacy, integrity, trust, and centralization. This research aims to implement an Artificial Intelligence-based Lightweight Blockchain Security Model (AILBSM) to ensure privacy and security of IIoT systems. This novel model is meant to address issues that can occur with security and privacy when dealing with Cloud-based IIoT systems that handle data in the Cloud or on the Edge of Networks (on-device). The novel contribution of this paper is that it combines the advantages of both lightweight blockchain and Convivial Optimized Sprinter Neural Network (COSNN) based AI mechanisms with simplified and improved security operations. Here, the significant impact of attacks is reduced by transforming features into encoded data using an Authentic Intrinsic Analysis (AIA) model. Extensive experiments are conducted to validate this system using various attack datasets. In addition, the results of privacy protection and AI mechanisms are evaluated separately and compared using various indicators. By using the proposed AILBSM framework, the execution time is minimized to 0.6 seconds, the overall classification accuracy is improved to 99.8%, and detection performance is increased to 99.7%. Due to the inclusion of auto-encoder based transformation and blockchain authentication, the anomaly detection performance of the proposed model is highly improved, when compared to other techniques.}, } @article {pmid36937168, year = {2023}, author = {Sadasivan, H and Maric, M and Dawson, E and Iyer, V and Israeli, J and Narayanasamy, S}, title = {Accelerating Minimap2 for Accurate Long Read Alignment on GPUs.}, journal = {Journal of biotechnology and biomedicine}, volume = {6}, number = {1}, pages = {13-23}, pmid = {36937168}, issn = {2642-9128}, support = {R01 HL144125/HL/NHLBI NIH HHS/United States ; }, abstract = {Long read sequencing technology is becoming increasingly popular for Precision Medicine applications like Whole Genome Sequencing (WGS) and microbial abundance estimation. Minimap2 is the state-of-the-art aligner and mapper used by the leading long read sequencing technologies, today. However, Minimap2 on CPUs is very slow for long noisy reads. ~60-70% of the run-time on a CPU comes from the highly sequential chaining step in Minimap2. On the other hand, most Point-of-Care computational workflows in long read sequencing use Graphics Processing Units (GPUs). We present minimap2-accelerated (mm2-ax), a heterogeneous design for sequence mapping and alignment where minimap2's compute intensive chaining step is sped up on the GPU and demonstrate its time and cost benefits. We extract better intra-read parallelism from chaining without losing mapping accuracy by forward transforming Minimap2's chaining algorithm. Moreover, we better utilize the high memory available on modern cloud instances apart from better workload balancing, data locality and minimal branch divergence on the GPU. We show mm2-ax on an NVIDIA A100 GPU improves the chaining step with 5.41 - 2.57X speedup and 4.07 - 1.93X speedup : costup over the fastest version of Minimap2, mm2-fast, benchmarked on a Google Cloud Platform instance of 30 SIMD cores.}, } @article {pmid36936667, year = {2023}, author = {Namoun, A and Tufail, A and Nawas, W and BenRhouma, O and Alshanqiti, A}, title = {A Systematic Literature Review on Service Composition for People with Disabilities: Taxonomies, Solutions, and Open Research Challenges.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {5934548}, pmid = {36936667}, issn = {1687-5273}, mesh = {Humans ; *Persons with Disabilities ; *Self-Help Devices ; }, abstract = {Integrating smart heterogeneous objects, IoT devices, data sources, and software services to produce new business processes and functionalities continues to attract considerable attention from the research community due to its unraveled advantages, including reusability, adaptation, distribution, and pervasiveness. However, the exploitation of service-oriented computing technologies (e.g., SOC, SOA, and microservice architectures) by people with special needs is underexplored and often overlooked. Furthermore, the existing challenges in this area are yet to be identified clearly. This research study presents a rigorous literature survey of the recent advances in service-oriented composition approaches and solutions for disabled people, their domains of application, and the major challenges, covering studies published between January 2010 and October 2022. To this end, we applied the systematic literature review (SLR) methodology to retrieve and collate only the articles presenting and discussing service composition solutions tailored to produce digitally accessible services for consumption by people who suffer from an impairment or loss of some physical or mental functions. We searched six renowned bibliographic databases, particularly IEEE Xplore, Web of Science, Springer Link, ACM Library, ScienceDirect, and Google Scholar, to synthesize a final pool of 38 related articles. Our survey contributes a comprehensive taxonomy of service composition solutions, techniques, and practices that are utilized to create assistive technologies and services. The seven-facet taxonomy helps researchers and practitioners to quickly understand and analyze the fundamental conceptualizations and characteristics of accessible service composition for people with disabilities. Key findings showed that services are fused to assist disabled persons to carry out their daily activities, mainly in smart homes and ambient intelligent environments. Despite the emergence of immersive technologies (e.g., wearable computing), user-service interactions are enabled primarily through tactile and speech modalities. Service descriptions mainly incorporate functional features (e.g., performance, latency, and cost) of service quality, largely ignoring accessibility features. Moreover, the outstanding research problems revolve around (1) the unavailability of assistive services datasets, (2) the underspecification of accessibility aspects of disabilities, (3) the weak adoption of accessible and universal design practices, (4) the abstraction of service composition approaches, and (5) the rare experimental testing of composition approaches with disabled users. We conclude our survey with a set of guidelines to realize effective assistive service composition in IoT and cloud environments. Researchers and practitioners are advised to create assistive services that support the social relationships of disabled users and model their accessibility needs as part of the quality of service (QoS). Moreover, they should exploit AI/ML models to address the evolving requirements of disabled users in their unique environments. Furthermore, weaknesses of service composition solutions and research challenges are exposed as notable opportunities for future research.}, } @article {pmid36923109, year = {2023}, author = {Wu, H}, title = {Sharing and Cooperation of Improved Cross-Entropy Optimization Algorithm in Telemedicine Multimedia Information Processing.}, journal = {International journal of telemedicine and applications}, volume = {2023}, number = {}, pages = {7353489}, pmid = {36923109}, issn = {1687-6415}, abstract = {In order to improve the efficiency of medical multimedia information sharing, this paper combines cloud computing technology and SOA (service-oriented architecture) technology to build a medical multimedia information sharing system. Building a medical information sharing platform requires integrating information resources stored in information systems of medical institutions and nonmedical information systems related to medical information and forming a huge resource pool. It is important to mine and analyze the information resources in the resource pool to realize the sharing and interaction of medical information. To this end, this paper proposes a gain-adaptive control algorithm with online adjustable parameters and investigates the extension of the mutual entropy optimization algorithm in the control domain and its integrated processing capability in the process of medical multimedia information processing. In addition, this paper constructs a medical multimedia information sharing and collaboration platform with medical multimedia information sharing and telemedicine as the core and verifies the effectiveness of the platform through experiments. The simulation results and comparison results with other systems prove that the system in this paper can realize fast data processing, retrieve and analyze massive data, and meet the demand of remote intelligent diagnosis under the premise of safety and stability. Meanwhile, the system in this paper can help hospitals achieve fast and accurate diagnosis, which has strong theoretical and practical values.}, } @article {pmid36914133, year = {2023}, author = {Aman, MA and Chu, HJ}, title = {Long-term river extent dynamics and transition detection using remote sensing: Case studies of Mekong and Ganga River.}, journal = {The Science of the total environment}, volume = {876}, number = {}, pages = {162774}, doi = {10.1016/j.scitotenv.2023.162774}, pmid = {36914133}, issn = {1879-1026}, abstract = {River dynamics are currently comprehensively studied at either a bankline or reach-scale level. Monitoring large-scale and long-term river extent dynamics provides fundamental insights relevant to the impact of climatic factors and anthropogenic activities on fluvial geomorphology. This study analyzed the two most populous rivers, Ganga and Mekong, to understand the river extent dynamics using 32 years of Landsat satellite data (1990-2022) in a cloud computing platform. This study categorizes river dynamics and transitions using the combination of pixel-wise water frequency and temporal trends. This approach can demarcate the river channel stability, areas affected by erosion and sedimentation, and the seasonal transitions in the river. The results illustrate that the Ganga river channel is found to be relatively unstable and very prone to meandering and migration as almost 40 % of the river channel has been altered in the past 32 years. The seasonal transitions, such as lost seasonal and seasonal to permanent changes are more prominent in the Ganga river, and the dominance of meandering and sedimentation in the lower course is also illustrated. In contrast, the Mekong river has a more stable course with erosion and sedimentation observed at sparse locations in the lower course. However, the lost seasonal and seasonal to permanent changes are also dominant in the Mekong river. Since 1990, Ganga and Mekong rivers have lost approximately 13.3 % and 4.7 % of their seasonal water respectively, as compared to the other transitions and categories. Factors such as climate change, floods, and man-made reservoirs could all be critical in triggering these morphological changes.}, } @article {pmid36913423, year = {2023}, author = {Paulraj, D and Sethukarasi, T and Neelakandan, S and Prakash, M and Baburaj, E}, title = {An Efficient Hybrid Job Scheduling Optimization (EHJSO) approach to enhance resource search using Cuckoo and Grey Wolf Job Optimization for cloud environment.}, journal = {PloS one}, volume = {18}, number = {3}, pages = {e0282600}, pmid = {36913423}, issn = {1932-6203}, mesh = {*Software ; *Algorithms ; Cloud Computing ; Internet ; }, abstract = {Cloud computing has now evolved as an unavoidable technology in the fields of finance, education, internet business, and nearly all organisations. The cloud resources are practically accessible to cloud users over the internet to accomplish the desired task of the cloud users. The effectiveness and efficacy of cloud computing services depend on the tasks that the cloud users submit and the time taken to complete the task as well. By optimising resource allocation and utilisation, task scheduling is crucial to enhancing the effectiveness and performance of a cloud system. In this context, cloud computing offers a wide range of advantages, such as cost savings, security, flexibility, mobility, quality control, disaster recovery, automatic software upgrades, and sustainability. According to a recent research survey, more and more tech-savvy companies and industry executives are recognize and utilize the advantages of the Cloud computing. Hence, as the number of users of the Cloud increases, so did the need to regulate the resource allocation as well. However, the scheduling of jobs in the cloud necessitates a smart and fast algorithm that can discover the resources that are accessible and schedule the jobs that are requested by different users. Consequently, for better resource allocation and job scheduling, a fast, efficient, tolerable job scheduling algorithm is required. Efficient Hybrid Job Scheduling Optimization (EHJSO) utilises Cuckoo Search Optimization and Grey Wolf Job Optimization (GWO). Due to some cuckoo species' obligate brood parasitism (laying eggs in other species' nests), the Cuckoo search optimization approach was developed. Grey wolf optimization (GWO) is a population-oriented AI system inspired by grey wolf social structure and hunting strategies. Make span, computation time, fitness, iteration-based performance, and success rate were utilised to compare previous studies. Experiments show that the recommended method is superior.}, } @article {pmid36910722, year = {2023}, author = {Yang, M and Ge, C and Zhao, X and Kou, H}, title = {FSPLO: a fast sensor placement location optimization method for cloud-aided inspection of smart buildings.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {12}, number = {1}, pages = {31}, pmid = {36910722}, issn = {2192-113X}, abstract = {With the awakening of health awareness, people are raising a series of health-related requirements for the buildings they live in, with a view to improving their living conditions. In this context, BIM (Building Information Modeling) makes full use of cutting-edge theories and technologies in many domains such as health, environment, and information technology to provide a new way for engineers to design and build various healthy and green buildings. Specifically, sensors are playing an important role in achieving smart building goals by monitoring the surroundings of buildings, objects and people with the help of cloud computing technology. In addition, it is necessary to quickly determine the optimal sensor placement to save energy and minimize the number of sensors for a building, which is a de-trial task for the cloud platform due to the limited number of sensors available and massive candidate locations for each sensor. In this paper, we propose a Fast Sensor Placement Location Optimization approach (FSPLO) to solve the BIM problem in cloud-aided smart buildings. In particular, we quickly filter out the repeated candidate locations of sensors in FSPLO using Locality Sensitive Hashing (LSH) techniques to maintain only a small number of optimized locations for deploying sensors around buildings. In this way, we can significantly reduce the number of sensors used for health and green buildings. Finally, a set of simulation experiments demonstrates the excellent performance of our proposed FSPLO method.}, } @article {pmid36904959, year = {2023}, author = {Salat, L and Davis, M and Khan, N}, title = {DNS Tunnelling, Exfiltration and Detection over Cloud Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, pmid = {36904959}, issn = {1424-8220}, abstract = {The domain name system (DNS) protocol is fundamental to the operation of the internet, however, in recent years various methodologies have been developed that enable DNS attacks on organisations. In the last few years, the increased use of cloud services by organisations has created further security challenges as cyber criminals use numerous methodologies to exploit cloud services, configurations and the DNS protocol. In this paper, two different DNS tunnelling methods, Iodine and DNScat, have been conducted in the cloud environment (Google and AWS) and positive results of exfiltration have been achieved under different firewall configurations. Detection of malicious use of DNS protocol can be a challenge for organisations with limited cybersecurity support and expertise. In this study, various DNS tunnelling detection techniques were utilised in a cloud environment to create an effective monitoring system with a reliable detection rate, low implementation cost, and ease of use for organisations with limited detection capabilities. The Elastic stack (an open-source framework) was used to configure a DNS monitoring system and to analyse the collected DNS logs. Furthermore, payload and traffic analysis techniques were implemented to identify different tunnelling methods. This cloud-based monitoring system offers various detection techniques that can be used for monitoring DNS activities of any network especially accessible to small organisations. Moreover, the Elastic stack is open-source and it has no limitation with regards to the data that can be uploaded daily.}, } @article {pmid36904927, year = {2023}, author = {Saban, M and Bekkour, M and Amdaouch, I and El Gueri, J and Ait Ahmed, B and Chaari, MZ and Ruiz-Alzola, J and Rosado-Muñoz, A and Aghzout, O}, title = {A Smart Agricultural System Based on PLC and a Cloud Computing Web Application Using LoRa and LoRaWan.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, pmid = {36904927}, issn = {1424-8220}, abstract = {The increasing challenges of agricultural processes and the growing demand for food globally are driving the industrial agriculture sector to adopt the concept of 'smart farming'. Smart farming systems, with their real-time management and high level of automation, can greatly improve productivity, food safety, and efficiency in the agri-food supply chain. This paper presents a customized smart farming system that uses a low-cost, low-power, and wide-range wireless sensor network based on Internet of Things (IoT) and Long Range (LoRa) technologies. In this system, LoRa connectivity is integrated with existing Programmable Logic Controllers (PLCs), which are commonly used in industry and farming to control multiple processes, devices, and machinery through the Simatic IOT2040. The system also includes a newly developed web-based monitoring application hosted on a cloud server, which processes data collected from the farm environment and allows for remote visualization and control of all connected devices. A Telegram bot is included for automated communication with users through this mobile messaging app. The proposed network structure has been tested, and the path loss in the wireless LoRa is evaluated.}, } @article {pmid36904909, year = {2023}, author = {Lin, HY and Tsai, TT and Ting, PY and Fan, YR}, title = {Identity-Based Proxy Re-Encryption Scheme Using Fog Computing and Anonymous Key Generation.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, pmid = {36904909}, issn = {1424-8220}, support = {MOST 110-2221-E-019-041-MY3//Ministry of Science and Technology of Republic of China/ ; }, abstract = {In the fog computing architecture, a fog is a node closer to clients and responsible for responding to users' requests as well as forwarding messages to clouds. In some medical applications such as the remote healthcare, a sensor of patients will first send encrypted data of sensed information to a nearby fog such that the fog acting as a re-encryption proxy could generate a re-encrypted ciphertext designated for requested data users in the cloud. Specifically, a data user can request access to cloud ciphertexts by sending a query to the fog node that will forward this query to the corresponding data owner who preserves the right to grant or deny the permission to access his/her data. When the access request is granted, the fog node will obtain a unique re-encryption key for carrying out the re-encryption process. Although some previous concepts have been proposed to fulfill these application requirements, they either have known security flaws or incur higher computational complexity. In this work, we present an identity-based proxy re-encryption scheme on the basis of the fog computing architecture. Our identity-based mechanism uses public channels for key distribution and avoids the troublesome problem of key escrow. We also formally prove that the proposed protocol is secure in the IND-PrID-CPA notion. Furthermore, we show that our work exhibits better performance in terms of computational complexity.}, } @article {pmid36904869, year = {2023}, author = {Lin, HY}, title = {Secure Data Transfer Based on a Multi-Level Blockchain for Internet of Vehicles.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, pmid = {36904869}, issn = {1424-8220}, abstract = {Because of the decentralized trait of the blockchain and the Internet of vehicles, both are very suitable for the architecture of the other. This study proposes a multi-level blockchain framework to secure information security on the Internet of vehicles. The main motivation of this study is to propose a new transaction block and ensure the identity of traders and the non-repudiation of transactions through the elliptic curve digital signature algorithm ECDSA. The designed multi-level blockchain architecture distributes the operations within the intra_cluster blockchain and the inter_cluster blockchain to improve the efficiency of the entire block. On the cloud computing platform, we exploit the threshold key management protocol, and the system can recover the system key as long as the threshold partial key is collected. This avoids the occurrence of PKI single-point failure. Thus, the proposed architecture ensures the security of OBU-RSU-BS-VM. The proposed multi-level blockchain framework consists of a block, intra-cluster blockchain and inter-cluster blockchain. The roadside unit RSU is responsible for the communication of vehicles in the vicinity, similar to a cluster head on the Internet of vehicles. This study exploits RSU to manage the block, and the base station is responsible for managing the intra-cluster blockchain named intra_clusterBC, and the cloud server at the back end is responsible for the entire system blockchain named inter_clusterBC. Finally, RSU, base stations and cloud servers cooperatively construct the multi-level blockchain framework and improve the security and the efficiency of the operation of the blockchain. Overall, in order to protect the security of the transaction data of the blockchain, we propose a new transaction block structure and adopt the elliptic curve cryptographic signature ECDSA to ensure that the Merkle tree root value is not changed and also make sure the transaction identity and non-repudiation of transaction data. Finally, this study considers information security in a cloud environment, and therefore we propose a secret-sharing and secure-map-reducing architecture based on the identity confirmation scheme. The proposed scheme with decentralization is very suitable for distributed connected vehicles and can also improve the execution efficiency of the blockchain.}, } @article {pmid36904852, year = {2023}, author = {Vitali, G and Arru, M and Magnanini, E}, title = {A Scalable Device for Undisturbed Measurement of Water and CO2 Fluxes through Natural Surfaces.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, pmid = {36904852}, issn = {1424-8220}, support = {101000256//H2020 European Research Council/ ; }, abstract = {In a climate change scenario and under a growing interest in Precision Agriculture, it is more and more important to map and record seasonal trends of the respiration of cropland and natural surfaces. Ground-level sensors to be placed in the field or integrated into autonomous vehicles are of growing interest. In this scope, a low-power IoT-compliant device for measurement of multiple surface CO2 and WV concentrations have been designed and developed. The device is described and tested under controlled and field conditions, showing ready and easy access to collected values typical of a cloud-computing-based approach. The device proved to be usable in indoor and open-air environments for a long time, and the sensors were arranged in multiple configurations to evaluate simultaneous concentrations and flows, while the low-cost, low-power (LP IoT-compliant) design is achieved by a specific design of the printed circuit board and a firmware code fitting the characteristics of the controller.}, } @article {pmid36904779, year = {2023}, author = {Kwon, Y and Kim, W and Jung, I}, title = {Neural Network Models for Driving Control of Indoor Autonomous Vehicles in Mobile Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, pmid = {36904779}, issn = {1424-8220}, abstract = {Mobile edge computing has been proposed as a solution for solving the latency problem of traditional cloud computing. In particular, mobile edge computing is needed in areas such as autonomous driving, which requires large amounts of data to be processed without latency for safety. Indoor autonomous driving is attracting attention as one of the mobile edge computing services. Furthermore, it relies on its sensors for location recognition because indoor autonomous driving cannot use a GPS device, as is the case with outdoor driving. However, while the autonomous vehicle is being driven, the real-time processing of external events and the correction of errors are required for safety. Furthermore, an efficient autonomous driving system is required because it is a mobile environment with resource constraints. This study proposes neural network models as a machine-learning method for autonomous driving in an indoor environment. The neural network model predicts the most appropriate driving command for the current location based on the range data measured with the LiDAR sensor. We designed six neural network models to be evaluated according to the number of input data points. In addition, we made an autonomous vehicle based on the Raspberry Pi for driving and learning and an indoor circular driving track for collecting data and performance evaluation. Finally, we evaluated six neural network models in terms of confusion matrix, response time, battery consumption, and driving command accuracy. In addition, when neural network learning was applied, the effect of the number of inputs was confirmed in the usage of resources. The result will influence the choice of an appropriate neural network model for an indoor autonomous vehicle.}, } @article {pmid36904650, year = {2023}, author = {Kumar, MS and Karri, GR}, title = {EEOA: Cost and Energy Efficient Task Scheduling in a Cloud-Fog Framework.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, pmid = {36904650}, issn = {1424-8220}, abstract = {Cloud-fog computing is a wide range of service environments created to provide quick, flexible services to customers, and the phenomenal growth of the Internet of Things (IoT) has produced an immense amount of data on a daily basis. To complete tasks and meet service-level agreement (SLA) commitments, the provider assigns appropriate resources and employs scheduling techniques to efficiently manage the execution of received IoT tasks in fog or cloud systems. The effectiveness of cloud services is directly impacted by some other important criteria, such as energy usage and cost, which are not taken into account by many of the existing methodologies. To resolve the aforementioned problems, an effective scheduling algorithm is required to schedule the heterogeneous workload and enhance the quality of service (QoS). Therefore, a nature-inspired multi-objective task scheduling algorithm called the electric earthworm optimization algorithm (EEOA) is proposed in this paper for IoT requests in a cloud-fog framework. This method was created using the combination of the earthworm optimization algorithm (EOA) and the electric fish optimization algorithm (EFO) to improve EFO's potential to be exploited while looking for the best solution to the problem at hand. Concerning execution time, cost, makespan, and energy consumption, the suggested scheduling technique's performance was assessed using significant instances of real-world workloads such as CEA-CURIE and HPC2N. Based on simulation results, our proposed approach improves efficiency by 89%, energy consumption by 94%, and total cost by 87% over existing algorithms for the scenarios considered using different benchmarks. Detailed simulations demonstrate that the suggested approach provides a superior scheduling scheme with better results than the existing scheduling techniques.}, } @article {pmid36904580, year = {2023}, author = {Kalinagac, O and Gür, G and Alagöz, F}, title = {Prioritization Based Task Offloading in UAV-Assisted Edge Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, pmid = {36904580}, issn = {1424-8220}, abstract = {Under demanding operational conditions such as traffic surges, coverage issues, and low latency requirements, terrestrial networks may become inadequate to provide the expected service levels to users and applications. Moreover, when natural disasters or physical calamities occur, the existing network infrastructure may collapse, leading to formidable challenges for emergency communications in the area served. In order to provide wireless connectivity as well as facilitate a capacity boost under transient high service load situations, a substitute or auxiliary fast-deployable network is needed. Unmanned Aerial Vehicle (UAV) networks are well suited for such needs thanks to their high mobility and flexibility. In this work, we consider an edge network consisting of UAVs equipped with wireless access points. These software-defined network nodes serve a latency-sensitive workload of mobile users in an edge-to-cloud continuum setting. We investigate prioritization-based task offloading to support prioritized services in this on-demand aerial network. To serve this end, we construct an offloading management optimization model to minimize the overall penalty due to priority-weighted delay against task deadlines. Since the defined assignment problem is NP-hard, we also propose three heuristic algorithms as well as a branch and bound style quasi-optimal task offloading algorithm and investigate how the system performs under different operating conditions by conducting simulation-based experiments. Moreover, we made an open-source contribution to Mininet-WiFi to have independent Wi-Fi mediums, which were compulsory for simultaneous packet transfers on different Wi-Fi mediums.}, } @article {pmid36900055, year = {2023}, author = {Barany, L and Hore, N and Stadlbauer, A and Buchfelder, M and Brandner, S}, title = {Prediction of the Topography of the Corticospinal Tract on T1-Weighted MR Images Using Deep-Learning-Based Segmentation.}, journal = {Diagnostics (Basel, Switzerland)}, volume = {13}, number = {5}, pages = {}, pmid = {36900055}, issn = {2075-4418}, abstract = {INTRODUCTION: Tractography is an invaluable tool in the planning of tumor surgery in the vicinity of functionally eloquent areas of the brain as well as in the research of normal development or of various diseases. The aim of our study was to compare the performance of a deep-learning-based image segmentation for the prediction of the topography of white matter tracts on T1-weighted MR images to the performance of a manual segmentation.

METHODS: T1-weighted MR images of 190 healthy subjects from 6 different datasets were utilized in this study. Using deterministic diffusion tensor imaging, we first reconstructed the corticospinal tract on both sides. After training a segmentation model on 90 subjects of the PIOP2 dataset using the nnU-Net in a cloud-based environment with graphical processing unit (Google Colab), we evaluated its performance using 100 subjects from 6 different datasets.

RESULTS: Our algorithm created a segmentation model that predicted the topography of the corticospinal pathway on T1-weighted images in healthy subjects. The average dice score was 0.5479 (0.3513-0.7184) on the validation dataset.

CONCLUSIONS: Deep-learning-based segmentation could be applicable in the future to predict the location of white matter pathways in T1-weighted scans.}, } @article {pmid36899558, year = {2023}, author = {Zhang, H and Wang, P and Zhang, S and Wu, Z}, title = {An adaptive offloading framework for license plate detection in collaborative edge and cloud computing.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {20}, number = {2}, pages = {2793-2814}, doi = {10.3934/mbe.2023131}, pmid = {36899558}, issn = {1551-0018}, abstract = {With the explosive growth of edge computing, huge amounts of data are being generated in billions of edge devices. It is really difficult to balance detection efficiency and detection accuracy at the same time for object detection on multiple edge devices. However, there are few studies to investigate and improve the collaboration between cloud computing and edge computing considering realistic challenges, such as limited computation capacities, network congestion and long latency. To tackle these challenges, we propose a new multi-model license plate detection hybrid methodology with the tradeoff between efficiency and accuracy to process the tasks of license plate detection at the edge nodes and the cloud server. We also design a new probability-based offloading initialization algorithm that not only obtains reasonable initial solutions but also facilitates the accuracy of license plate detection. In addition, we introduce an adaptive offloading framework by gravitational genetic searching algorithm (GGSA), which can comprehensively consider influential factors such as license plate detection time, queuing time, energy consumption, image quality, and accuracy. GGSA is helpful for Quality-of-Service (QoS) enhancement. Extensive experiments show that our proposed GGSA offloading framework exhibits good performance in collaborative edge and cloud computing of license plate detection compared with other methods. It demonstrate that when compared with traditional all tasks are executed on the cloud server (AC), the offloading effect of GGSA can be improved by 50.31%. Besides, the offloading framework has strong portability when making real-time offloading decisions.}, } @article {pmid36878917, year = {2023}, author = {Grossman, RL}, title = {Ten lessons for data sharing with a data commons.}, journal = {Scientific data}, volume = {10}, number = {1}, pages = {120}, pmid = {36878917}, issn = {2052-4463}, support = {U2CHL138346,//U.S. Department of Health & Human Services | NIH | National Heart, Lung, and Blood Institute (NHLBI)/ ; }, abstract = {A data commons is a cloud-based data platform with a governance structure that allows a community to manage, analyze and share its data. Data commons provide a research community with the ability to manage and analyze large datasets using the elastic scalability provided by cloud computing and to share data securely and compliantly, and, in this way, accelerate the pace of research. Over the past decade, a number of data commons have been developed and we discuss some of the lessons learned from this effort.}, } @article {pmid36867158, year = {2024}, author = {Kumar, D and Mandal, N and Kumar, Y}, title = {Cloud-Based Advanced Shuffled Frog Leaping Algorithm for Tasks Scheduling.}, journal = {Big data}, volume = {12}, number = {2}, pages = {110-126}, doi = {10.1089/big.2022.0095}, pmid = {36867158}, issn = {2167-647X}, mesh = {*Cloud Computing ; *Algorithms ; Learning ; }, abstract = {In recent years, the world has seen incremental growth in online activities owing to which the volume of data in cloud servers has also been increasing exponentially. With rapidly increasing data, load on cloud servers has increased in the cloud computing environment. With rapidly evolving technology, various cloud-based systems were developed to enhance the user experience. But, the increased online activities around the globe have also increased data load on the cloud-based systems. To maintain the efficiency and performance of the applications hosted in cloud servers, task scheduling has become very important. The task scheduling process helps in reducing the makespan time and average cost by scheduling the tasks to virtual machines (VMs). The task scheduling depends on assigning tasks to VMs to process the incoming tasks. The task scheduling should follow some algorithm for assigning tasks to VMs. Many researchers have proposed different scheduling algorithms for task scheduling in the cloud computing environment. In this article, an advanced form of the shuffled frog optimization algorithm, which works on the nature and behavior of frogs searching for food, has been proposed. The authors have introduced a new algorithm to shuffle the position of frogs in memeplex to obtain the best result. By using this optimization technique, the cost function of the central processing unit, makespan, and fitness function were calculated. The fitness function is the sum of the budget cost function and the makespan time. The proposed method helps in reducing the makespan time as well as the average cost by scheduling the tasks to VMs effectively. Finally, the performance of the proposed advanced shuffled frog optimization method is compared with existing task scheduling methods such as whale optimization-based scheduler (W-Scheduler), sliced particle swarm optimization (SPSO-SA), inverted ant colony optimization algorithm, and static learning particle swarm optimization (SLPSO-SA) in terms of average cost and metric makespan. Experimentally, it was concluded that the proposed advanced frog optimization algorithm can schedule tasks to the VMs more effectively as compared with other scheduling methods with a makespan of 6, average cost of 4, and fitness of 10.}, } @article {pmid36860419, year = {2023}, author = {Singh, J and Chen, J and Singh, SP and Singh, MP and Hassan, MM and Hassan, MM and Awal, H}, title = {Load-Balancing Strategy: Employing a Capsule Algorithm for Cutting Down Energy Consumption in Cloud Data Centers for Next Generation Wireless Systems.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {6090282}, pmid = {36860419}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; Data Accuracy ; Electric Power Supplies ; Happiness ; }, abstract = {Per-user pricing is possible with cloud computing, a relatively new technology. It provides remote testing and commissioning services through the web, and it utilizes virtualization to make available computing resources. In order to host and store firm data, cloud computing relies on data centers. Data centers are made up of networked computers, cables, power supplies, and other components. Cloud data centers have always had to prioritise high performance over energy efficiency. The biggest obstacle is finding a happy medium between system performance and energy consumption, namely, lowering energy use without compromising system performance or service quality. These results were obtained using the PlanetLab dataset. In order to implement the strategy we recommend, it is crucial to get a complete picture of how energy is being consumed in the cloud. Using proper optimization criteria and guided by energy consumption models, this article offers the Capsule Significance Level of Energy Consumption (CSLEC) pattern, which demonstrates how to conserve more energy in cloud data centers. Capsule optimization's prediction phase F1-score of 96.7 percent and 97 percent data accuracy allow for more precise projections of future value.}, } @article {pmid36855338, year = {2023}, author = {Calcaterra, D and Tomarchio, O}, title = {Policy-Based Holistic Application Management with BPMN and TOSCA.}, journal = {SN computer science}, volume = {4}, number = {3}, pages = {232}, pmid = {36855338}, issn = {2661-8907}, abstract = {With the wide adoption of cloud computing across technology industries and research institutions, an ever-growing interest in cloud orchestration frameworks has emerged over the past few years. These orchestration frameworks enable the automated provisioning and decommissioning of cloud applications in a timely and efficient manner, but they offer limited or no support for application management. While management functionalities, such as configuring, monitoring and scaling single components, can be directly covered by cloud providers and configuration management tools, holistic management features, such as backing up, testing and updating multiple components, cannot be automated using these approaches. In this paper, we propose a concept to automatically generate executable holistic management workflows based on the TOSCA standard. The practical feasibility of the approach is validated through a prototype implementation and a case study.}, } @article {pmid36852030, year = {2023}, author = {Manconi, A and Gnocchi, M and Milanesi, L and Marullo, O and Armano, G}, title = {Framing Apache Spark in life sciences.}, journal = {Heliyon}, volume = {9}, number = {2}, pages = {e13368}, pmid = {36852030}, issn = {2405-8440}, abstract = {Advances in high-throughput and digital technologies have required the adoption of big data for handling complex tasks in life sciences. However, the drift to big data led researchers to face technical and infrastructural challenges for storing, sharing, and analysing them. In fact, this kind of tasks requires distributed computing systems and algorithms able to ensure efficient processing. Cutting edge distributed programming frameworks allow to implement flexible algorithms able to adapt the computation to the data over on-premise HPC clusters or cloud architectures. In this context, Apache Spark is a very powerful HPC engine for large-scale data processing on clusters. Also thanks to specialised libraries for working with structured and relational data, it allows to support machine learning, graph-based computation, and stream processing. This review article is aimed at helping life sciences researchers to ascertain the features of Apache Spark and to assess whether it can be successfully used in their research activities.}, } @article {pmid36850940, year = {2023}, author = {Antonini, M and Pincheira, M and Vecchio, M and Antonelli, F}, title = {An Adaptable and Unsupervised TinyML Anomaly Detection System for Extreme Industrial Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850940}, issn = {1424-8220}, abstract = {Industrial assets often feature multiple sensing devices to keep track of their status by monitoring certain physical parameters. These readings can be analyzed with machine learning (ML) tools to identify potential failures through anomaly detection, allowing operators to take appropriate corrective actions. Typically, these analyses are conducted on servers located in data centers or the cloud. However, this approach increases system complexity and is susceptible to failure in cases where connectivity is unavailable. Furthermore, this communication restriction limits the approach's applicability in extreme industrial environments where operating conditions affect communication and access to the system. This paper proposes and evaluates an end-to-end adaptable and configurable anomaly detection system that uses the Internet of Things (IoT), edge computing, and Tiny-MLOps methodologies in an extreme industrial environment such as submersible pumps. The system runs on an IoT sensing Kit, based on an ESP32 microcontroller and MicroPython firmware, located near the data source. The processing pipeline on the sensing device collects data, trains an anomaly detection model, and alerts an external gateway in the event of an anomaly. The anomaly detection model uses the isolation forest algorithm, which can be trained on the microcontroller in just 1.2 to 6.4 s and detect an anomaly in less than 16 milliseconds with an ensemble of 50 trees and 80 KB of RAM. Additionally, the system employs blockchain technology to provide a transparent and irrefutable repository of anomalies.}, } @article {pmid36850847, year = {2023}, author = {Luo, G and He, B and Xiong, Y and Wang, L and Wang, H and Zhu, Z and Shi, X}, title = {An Optimized Convolutional Neural Network for the 3D Point-Cloud Compression.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850847}, issn = {1424-8220}, support = {20224BAB202016//Natural Science Foundation of Jiangxi Province/ ; 20224BAB212014//Natural Science Foundation of Jiangxi Province/ ; }, abstract = {Due to the tremendous volume taken by the 3D point-cloud models, knowing how to achieve the balance between a high compression ratio, a low distortion rate, and computing cost in point-cloud compression is a significant issue in the field of virtual reality (VR). Convolutional neural networks have been used in numerous point-cloud compression research approaches during the past few years in an effort to progress the research state. In this work, we have evaluated the effects of different network parameters, including neural network depth, stride, and activation function on point-cloud compression, resulting in an optimized convolutional neural network for compression. We first have analyzed earlier research on point-cloud compression based on convolutional neural networks before designing our own convolutional neural network. Then, we have modified our model parameters using the experimental data to further enhance the effect of point-cloud compression. Based on the experimental results, we have found that the neural network with the 4 layers and 2 strides parameter configuration using the Sigmoid activation function outperforms the default configuration by 208% in terms of the compression-distortion rate. The experimental results show that our findings are effective and universal and make a great contribution to the research of point-cloud compression using convolutional neural networks.}, } @article {pmid36850846, year = {2023}, author = {Liu, S and Yang, S and Zhang, H and Wu, W}, title = {A Federated Learning and Deep Reinforcement Learning-Based Method with Two Types of Agents for Computation Offload.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850846}, issn = {1424-8220}, support = {62002279//National Natural Science Foundation of China/ ; 2020JQ-077//Natural Science Basic Research Program of Shaanxi/ ; ZR2021LZH009//Shandong Provincial Natural Science Foundation/ ; }, abstract = {With the rise of latency-sensitive and computationally intensive applications in mobile edge computing (MEC) environments, the computation offloading strategy has been widely studied to meet the low-latency demands of these applications. However, the uncertainty of various tasks and the time-varying conditions of wireless networks make it difficult for mobile devices to make efficient decisions. The existing methods also face the problems of long-delay decisions and user data privacy disclosures. In this paper, we present the FDRT, a federated learning and deep reinforcement learning-based method with two types of agents for computation offload, to minimize the system latency. FDRT uses a multi-agent collaborative computation offloading strategy, namely, DRT. DRT divides the offloading decision into whether to compute tasks locally and whether to offload tasks to MEC servers. The designed DDQN agent considers the task information, its own resources, and the network status conditions of mobile devices, and the designed D3QN agent considers these conditions of all MEC servers in the collaborative cloud-side end MEC system; both jointly learn the optimal decision. FDRT also applies federated learning to reduce communication overhead and optimize the model training of DRT by designing a new parameter aggregation method, while protecting user data privacy. The simulation results showed that DRT effectively reduced the average task execution delay by up to 50% compared with several baselines and state-of-the-art offloading strategies. FRDT also accelerates the convergence rate of multi-agent training and reduces the training time of DRT by 61.7%.}, } @article {pmid36850813, year = {2023}, author = {Vaño, R and Lacalle, I and Sowiński, P and S-Julián, R and Palau, CE}, title = {Cloud-Native Workload Orchestration at the Edge: A Deployment Review and Future Directions.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850813}, issn = {1424-8220}, support = {101069732//European Commission/ ; }, abstract = {Cloud-native computing principles such as virtualization and orchestration are key to transferring to the promising paradigm of edge computing. Challenges of containerization, operative models and scarce availability of established tools make a thorough review indispensable. Therefore, the authors have described the practical methods and tools found in the literature as well as in current community-led development projects, and have thoroughly exposed the future directions of the field. Container virtualization and its orchestration through Kubernetes have dominated the cloud computing domain, while major efforts have been recently recorded focused on the adaptation of these technologies to the edge. Such initiatives have addressed either the reduction of container engines and the development of specific tailored operating systems or the development of smaller K8s distributions and edge-focused adaptations (such as KubeEdge). Finally, new workload virtualization approaches, such as WebAssembly modules together with the joint orchestration of these heterogeneous workloads, seem to be the topics to pay attention to in the short to medium term.}, } @article {pmid36850794, year = {2023}, author = {Lee, S}, title = {Distributed Detection of Malicious Android Apps While Preserving Privacy Using Federated Learning.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850794}, issn = {1424-8220}, support = {2020R1F1A1063942//National Research Foundation of Korea/ ; }, abstract = {Recently, deep learning has been widely used to solve existing computing problems through large-scale data mining. Conventional training of the deep learning model is performed on a central (cloud) server that is equipped with high computing power, by integrating data via high computational intensity. However, integrating raw data from multiple clients raises privacy concerns that are increasingly being focused on. In federated learning (FL), clients train deep learning models in a distributed fashion using their local data; instead of sending raw data to a central server, they send parameter values of the trained local model to a central server for integration. Because FL does not transmit raw data to the outside, it is free from privacy issues. In this paper, we perform an experimental study that explores the dynamics of the FL-based Android malicious app detection method under three data distributions across clients, i.e., (i) independent and identically distributed (IID), (ii) non-IID, (iii) non-IID and unbalanced. Our experiments demonstrate that the application of FL is feasible and efficient in detecting malicious Android apps in a distributed manner on cellular networks.}, } @article {pmid36850785, year = {2023}, author = {Chang, RC and Wang, CY and Li, YH and Chiu, CD}, title = {Design of Low-Complexity Convolutional Neural Network Accelerator for Finger Vein Identification System.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850785}, issn = {1424-8220}, support = {111-2218-E-005-008//National Science and Technology Council of Taiwan, R.O.C./ ; }, mesh = {Humans ; *Neural Networks, Computer ; *Algorithms ; Biometry ; Extremities ; Laboratories ; }, abstract = {In the biometric field, vein identification is a vital process that is constrained by the invisibility of veins as well as other unique features. Moreover, users generally do not wish to have their personal information uploaded to the cloud, so edge computing has become popular for the sake of protecting user privacy. In this paper, we propose a low-complexity and lightweight convolutional neural network (CNN) and we design intellectual property (IP) for shortening the inference time in finger vein recognition. This neural network system can operate independently in client mode. After fetching the user's finger vein image via a near-infrared (NIR) camera mounted on an embedded system, vein features can be efficiently extracted by vein curving algorithms and user identification can be completed quickly. Better image quality and higher recognition accuracy can be obtained by combining several preprocessing techniques and the modified CNN. Experimental data were collected by the finger vein image capture equipment developed in our laboratory based on the specifications of similar products currently on the market. Extensive experiments demonstrated the practicality and robustness of the proposed finger vein identification system.}, } @article {pmid36850784, year = {2023}, author = {Mohamed, AA and Abualigah, L and Alburaikan, A and Khalifa, HAE}, title = {AOEHO: A New Hybrid Data Replication Method in Fog Computing for IoT Application.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850784}, issn = {1424-8220}, abstract = {Recently, the concept of the internet of things and its services has emerged with cloud computing. Cloud computing is a modern technology for dealing with big data to perform specified operations. The cloud addresses the problem of selecting and placing iterations across nodes in fog computing. Previous studies focused on original swarm intelligent and mathematical models; thus, we proposed a novel hybrid method based on two modern metaheuristic algorithms. This paper combined the Aquila Optimizer (AO) algorithm with the elephant herding optimization (EHO) for solving dynamic data replication problems in the fog computing environment. In the proposed method, we present a set of objectives that determine data transmission paths, choose the least cost path, reduce network bottlenecks, bandwidth, balance, and speed data transfer rates between nodes in cloud computing. A hybrid method, AOEHO, addresses the optimal and least expensive path, determines the best replication via cloud computing, and determines optimal nodes to select and place data replication near users. Moreover, we developed a multi-objective optimization based on the proposed AOEHO to decrease the bandwidth and enhance load balancing and cloud throughput. The proposed method is evaluated based on data replication using seven criteria. These criteria are data replication access, distance, costs, availability, SBER, popularity, and the Floyd algorithm. The experimental results show the superiority of the proposed AOEHO strategy performance over other algorithms, such as bandwidth, distance, load balancing, data transmission, and least cost path.}, } @article {pmid36850763, year = {2023}, author = {da Silva, JCF and Silva, MC and Luz, EJS and Delabrida, S and Oliveira, RAR}, title = {Using Mobile Edge AI to Detect and Map Diseases in Citrus Orchards.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850763}, issn = {1424-8220}, mesh = {*Agriculture ; Algorithms ; *Citrus ; Artificial Intelligence ; }, abstract = {Deep Learning models have presented promising results when applied to Agriculture 4.0. Among other applications, these models can be used in disease detection and fruit counting. Deep Learning models usually have many layers in the architecture and millions of parameters. This aspect hinders the use of Deep Learning on mobile devices as they require a large amount of processing power for inference. In addition, the lack of high-quality Internet connectivity in the field impedes the usage of cloud computing, pushing the processing towards edge devices. This work describes the proposal of an edge AI application to detect and map diseases in citrus orchards. The proposed system has low computational demand, enabling the use of low-footprint models for both detection and classification tasks. We initially compared AI algorithms to detect fruits on trees. Specifically, we analyzed and compared YOLO and Faster R-CNN. Then, we studied lean AI models to perform the classification task. In this context, we tested and compared the performance of MobileNetV2, EfficientNetV2-B0, and NASNet-Mobile. In the detection task, YOLO and Faster R-CNN had similar AI performance metrics, but YOLO was significantly faster. In the image classification task, MobileNetMobileV2 and EfficientNetV2-B0 obtained an accuracy of 100%, while NASNet-Mobile had a 98% performance. As for the timing performance, MobileNetV2 and EfficientNetV2-B0 were the best candidates, while NASNet-Mobile was significantly worse. Furthermore, MobileNetV2 had a 10% better performance than EfficientNetV2-B0. Finally, we provide a method to evaluate the results from these algorithms towards describing the disease spread using statistical parametric models and a genetic algorithm to perform the parameters' regression. With these results, we validated the proposed pipeline, enabling the usage of adequate AI models to develop a mobile edge AI solution.}, } @article {pmid36850711, year = {2023}, author = {Chen, Z and Amani, AM and Yu, X and Jalili, M}, title = {Control and Optimisation of Power Grids Using Smart Meter Data: A Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850711}, issn = {1424-8220}, support = {LP180101309//Australian Research Council/ ; }, abstract = {This paper provides a comprehensive review of the applications of smart meters in the control and optimisation of power grids to support a smooth energy transition towards the renewable energy future. The smart grids become more complicated due to the presence of small-scale low inertia generators and the implementation of electric vehicles (EVs), which are mainly based on intermittent and variable renewable energy resources. Optimal and reliable operation of this environment using conventional model-based approaches is very difficult. Advancements in measurement and communication technologies have brought the opportunity of collecting temporal or real-time data from prosumers through Advanced Metering Infrastructure (AMI). Smart metering brings the potential of applying data-driven algorithms for different power system operations and planning services, such as infrastructure sizing and upgrade and generation forecasting. It can also be used for demand-side management, especially in the presence of new technologies such as EVs, 5G/6G networks and cloud computing. These algorithms face privacy-preserving and cybersecurity challenges that need to be well addressed. This article surveys the state-of-the-art of each of these topics, reviewing applications, challenges and opportunities of using smart meters to address them. It also stipulates the challenges that smart grids present to smart meters and the benefits that smart meters can bring to smart grids. Furthermore, the paper is concluded with some expected future directions and potential research questions for smart meters, smart grids and their interplay.}, } @article {pmid36850688, year = {2023}, author = {Fathy, C and Ali, HM}, title = {A Secure IoT-Based Irrigation System for Precision Agriculture Using the Expeditious Cipher.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850688}, issn = {1424-8220}, abstract = {Due to the recent advances in the domain of smart agriculture as a result of integrating traditional agriculture and the latest information technologies including the Internet of Things (IoT), cloud computing, and artificial intelligence (AI), there is an urgent need to address the information security-related issues and challenges in this field. In this article, we propose the integration of lightweight cryptography techniques into the IoT ecosystem for smart agriculture to meet the requirements of resource-constrained IoT devices. Moreover, we investigate the adoption of a lightweight encryption protocol, namely, the Expeditious Cipher (X-cipher), to create a secure channel between the sensing layer and the broker in the Message Queue Telemetry Transport (MQTT) protocol as well as a secure channel between the broker and its subscribers. Our case study focuses on smart irrigation systems, and the MQTT protocol is deployed as the application messaging protocol in these systems. Smart irrigation strives to decrease the misuse of natural resources by enhancing the efficiency of agricultural irrigation. This secure channel is utilized to eliminate the main security threat in precision agriculture by protecting sensors' published data from eavesdropping and theft, as well as from unauthorized changes to sensitive data that can negatively impact crops' development. In addition, the secure channel protects the irrigation decisions made by the data analytics (DA) entity regarding the irrigation time and the quantity of water that is returned to actuators from any alteration. Performance evaluation of our chosen lightweight encryption protocol revealed an improvement in terms of power consumption, execution time, and required memory usage when compared with the Advanced Encryption Standard (AES). Moreover, the selected lightweight encryption protocol outperforms the PRESENT lightweight encryption protocol in terms of throughput and memory usage.}, } @article {pmid36850563, year = {2023}, author = {Shahid, MA and Alam, MM and Su'ud, MM}, title = {Achieving Reliability in Cloud Computing by a Novel Hybrid Approach.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850563}, issn = {1424-8220}, abstract = {Cloud computing (CC) benefits and opportunities are among the fastest growing technologies in the computer industry. Cloud computing's challenges include resource allocation, security, quality of service, availability, privacy, data management, performance compatibility, and fault tolerance. Fault tolerance (FT) refers to a system's ability to continue performing its intended task in the presence of defects. Fault-tolerance challenges include heterogeneity and a lack of standards, the need for automation, cloud downtime reliability, consideration for recovery point objects, recovery time objects, and cloud workload. The proposed research includes machine learning (ML) algorithms such as naïve Bayes (NB), library support vector machine (LibSVM), multinomial logistic regression (MLR), sequential minimal optimization (SMO), K-nearest neighbor (KNN), and random forest (RF) as well as a fault-tolerance method known as delta-checkpointing to achieve higher accuracy, lesser fault prediction error, and reliability. Furthermore, the secondary data were collected from the homonymous, experimental high-performance computing (HPC) system at the Swiss Federal Institute of Technology (ETH), Zurich, and the primary data were generated using virtual machines (VMs) to select the best machine learning classifier. In this article, the secondary and primary data were divided into two split ratios of 80/20 and 70/30, respectively, and cross-validation (5-fold) was used to identify more accuracy and less prediction of faults in terms of true, false, repair, and failure of virtual machines. Secondary data results show that naïve Bayes performed exceptionally well on CPU-Mem mono and multi blocks, and sequential minimal optimization performed very well on HDD mono and multi blocks in terms of accuracy and fault prediction. In the case of greater accuracy and less fault prediction, primary data results revealed that random forest performed very well in terms of accuracy and fault prediction but not with good time complexity. Sequential minimal optimization has good time complexity with minor differences in random forest accuracy and fault prediction. We decided to modify sequential minimal optimization. Finally, the modified sequential minimal optimization (MSMO) algorithm with the fault-tolerance delta-checkpointing (D-CP) method is proposed to improve accuracy, fault prediction error, and reliability in cloud computing.}, } @article {pmid36850350, year = {2023}, author = {Alsokhiry, F and Annuk, A and Mohamed, MA and Marinho, M}, title = {An Innovative Cloud-Fog-Based Smart Grid Scheme for Efficient Resource Utilization.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850350}, issn = {1424-8220}, abstract = {Smart grids (SGs) enhance the effectiveness, reliability, resilience, and energy-efficient operation of electrical networks. Nonetheless, SGs suffer from big data transactions which limit their capabilities and can cause delays in the optimal operation and management tasks. Therefore, it is clear that a fast and reliable architecture is needed to make big data management in SGs more efficient. This paper assesses the optimal operation of the SGs using cloud computing (CC), fog computing, and resource allocation to enhance the management problem. Technically, big data management makes SG more efficient if cloud and fog computing (CFC) are integrated. The integration of fog computing (FC) with CC minimizes cloud burden and maximizes resource allocation. There are three key features for the proposed fog layer: awareness of position, short latency, and mobility. Moreover, a CFC-driven framework is proposed to manage data among different agents. In order to make the system more efficient, FC allocates virtual machines (VMs) according to load-balancing techniques. In addition, the present study proposes a hybrid gray wolf differential evolution optimization algorithm (HGWDE) that brings gray wolf optimization (GWO) and improved differential evolution (IDE) together. Simulation results conducted in MATLAB verify the efficiency of the suggested algorithm according to the high data transaction and computational time. According to the results, the response time of HGWDE is 54 ms, 82.1 ms, and 81.6 ms faster than particle swarm optimization (PSO), differential evolution (DE), and GWO. HGWDE's processing time is 53 ms, 81.2 ms, and 80.6 ms faster than PSO, DE, and GWO. Although GWO is a bit more efficient than HGWDE, the difference is not very significant.}, } @article {pmid36847779, year = {2023}, author = {Krog, D and Enghoff, MB and Köhn, C}, title = {A Monte Carlo approach to study the effect of ions on the nucleation of sulfuric acid-water clusters.}, journal = {Journal of computational chemistry}, volume = {44}, number = {13}, pages = {1250-1262}, doi = {10.1002/jcc.27076}, pmid = {36847779}, issn = {1096-987X}, abstract = {The nucleation of sulfuric acid-water clusters is a significant contribution to the formation of aerosols as precursors of cloud condensation nuclei (CCN). Depending on the temperature, there is an interplay between the clustering of particles and their evaporation controlling the efficiency of cluster growth. For typical temperatures in the atmosphere, the evaporation of H2 SO4 (?)H2 O clusters is more efficient than the clustering of the first, small clusters, and thus their growth is dampened at its early stages. Since the evaporation rates of small clusters containing an HSO 4 - ion are much smaller than for purely neutral sulfuric acid clusters, they can serve as a central body for the further attachment of H2 SO4 (?)H2 O molecules. We here present an innovative Monte Carlo model to study the growth of aqueous sulfuric acid clusters around central ions. Unlike classical thermodynamic nucleation theory or kinetic models, this model allows to trace individual particles and thus to determine properties for each individual particle. As a benchmarking case, we have performed simulations at T = 300 K a relative humidity of 50% with dipole and ion concentrations of c dipole = 5 × 10 8 - 10 9 cm - 3 and c ion = 0 - 10 7 cm - 3 . We discuss the runtime of our simulations and present the velocity distribution of ionic clusters, the size distribution of the clusters as well as the formation rate of clusters with radii R ≥ 0.85 nm . Simulations give reasonable velocity and size distributions and there is a good agreement of the formation rates with previous results, including the relevance of ions for the initial growth of sulfuric acid-water clusters. Conclusively, we present a computational method which allows studying detailed particle properties during the growth of aerosols as a precursor of CCN.}, } @article {pmid36846250, year = {2023}, author = {Gustafsson, W and Dórea, FC and Widgren, S and Frössling, J and Vidal, G and Kim, H and Cha, W and Comin, A and Rodriguez Ewerlöf, I and Rosendal, T}, title = {Data workflows and visualization in support of surveillance practice.}, journal = {Frontiers in veterinary science}, volume = {10}, number = {}, pages = {1129863}, pmid = {36846250}, issn = {2297-1769}, abstract = {The Swedish National Veterinary Institute (SVA) is working on implementing reusable and adaptable workflows for epidemiological analysis and dynamic report generation to improve disease surveillance. Important components of this work include: data access, development environment, computational resources and cloud-based management. The development environment relies on Git for code collaboration and version control and the R language for statistical computing and data visualization. The computational resources include both local and cloud-based systems, with automatic workflows managed in the cloud. The workflows are designed to be flexible and adaptable to changing data sources and stakeholder demands, with the ultimate goal to create a robust infrastructure for the delivery of actionable epidemiological information.}, } @article {pmid36842917, year = {2023}, author = {Johnson, E and Campos-Cerqueira, M and Jumail, A and Yusni, ASA and Salgado-Lynn, M and Fornace, K}, title = {Applications and advances in acoustic monitoring for infectious disease epidemiology.}, journal = {Trends in parasitology}, volume = {39}, number = {5}, pages = {386-399}, doi = {10.1016/j.pt.2023.01.008}, pmid = {36842917}, issn = {1471-5007}, mesh = {Animals ; Humans ; *Ecosystem ; Biodiversity ; Animals, Wild ; Acoustics ; *Communicable Diseases/epidemiology ; }, abstract = {Emerging infectious diseases continue to pose a significant burden on global public health, and there is a critical need to better understand transmission dynamics arising at the interface of human activity and wildlife habitats. Passive acoustic monitoring (PAM), more typically applied to questions of biodiversity and conservation, provides an opportunity to collect and analyse audio data in relative real time and at low cost. Acoustic methods are increasingly accessible, with the expansion of cloud-based computing, low-cost hardware, and machine learning approaches. Paired with purposeful experimental design, acoustic data can complement existing surveillance methods and provide a novel toolkit to investigate the key biological parameters and ecological interactions that underpin infectious disease epidemiology.}, } @article {pmid36842572, year = {2023}, author = {Andaryani, S and Nourani, V and Abbasnejad, H and Koch, J and Stisen, S and Klöve, B and Haghighi, AT}, title = {Spatio-temporal analysis of climate and irrigated vegetation cover changes and their role in lake water level depletion using a pixel-based approach and canonical correlation analysis.}, journal = {The Science of the total environment}, volume = {873}, number = {}, pages = {162326}, doi = {10.1016/j.scitotenv.2023.162326}, pmid = {36842572}, issn = {1879-1026}, abstract = {Lake Urmia, located in northwest Iran, was among the world's largest hypersaline lakes but has now experienced a 7 m decrease in water level, from 1278 m to 1271 over 1996 to 2019. There is doubt as to whether the pixel-based analysis (PBA) approach's answer to the lake's drying is a natural process or a result of human intervention. Here, a non-parametric Mann-Kendall trend test was applied to a 21-year record (2000-2020) of satellite data products, i.e., temperature, precipitation, snow cover, and irrigated vegetation cover (IVC). The Google Earth Engine (GEE) cloud-computing platform utilized over 10 sub-basins in three provinces surrounding Lake Urmia to obtain and calculate pixel-based monthly and seasonal scales for the products. Canonical correlation analysis was employed in order to understand the correlation between variables and lake water level (LWL). The trend analysis results show significant increases in temperature (from 1 to 2 °C during 2000-2020) over May-September, i.e., in 87 %-25 % of the basin. However, precipitation has seen an insignificant decrease (from 3 to 9 mm during 2000-2019) in the rainy months (April and May). Snow cover has also decreased and, when compared with precipitation, shows a change in precipitation patterns from snow to rain. IVC has increased significantly in all sub-basins, especially the southern parts of the lake, with the West province making the largest contribution to the development of IVC. According to the PBA, this analysis underpins the very high contribution of IVC to the drying of the lake in more detail, although the contribution of climate change in this matter is also apparent. The development of IVC leads to increased water consumption through evapotranspiration and excess evaporation caused by the storage of water for irrigation. Due to the decreased runoff caused by consumption exceeding the basin's capacity, the lake cannot be fed sufficiently.}, } @article {pmid36832716, year = {2023}, author = {Yuan, L and Wang, Z and Sun, P and Wei, Y}, title = {An Efficient Virtual Machine Consolidation Algorithm for Cloud Computing.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {2}, pages = {}, pmid = {36832716}, issn = {1099-4300}, abstract = {With the rapid development of integration in blockchain and IoT, virtual machine consolidation (VMC) has become a heated topic because it can effectively improve the energy efficiency and service quality of cloud computing in the blockchain. The current VMC algorithm is not effective enough because it does not regard the load of the virtual machine (VM) as an analyzed time series. Therefore, we proposed a VMC algorithm based on load forecast to improve efficiency. First, we proposed a migration VM selection strategy based on load increment prediction called LIP. Combined with the current load and load increment, this strategy can effectively improve the accuracy of selecting VM from the overloaded physical machines (PMs). Then, we proposed a VM migration point selection strategy based on the load sequence prediction called SIR. We merged VMs with complementary load series into the same PM, effectively improving the stability of the PM load, thereby reducing the service level agreement violation (SLAV) and the number of VM migrations due to the resource competition of the PM. Finally, we proposed a better virtual machine consolidation (VMC) algorithm based on the load prediction of LIP and SIR. The experimental results show that our VMC algorithm can effectively improve energy efficiency.}, } @article {pmid36832692, year = {2023}, author = {Tsuruyama, T}, title = {Kullback-Leibler Divergence of an Open-Queuing Network of a Cell-Signal-Transduction Cascade.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {2}, pages = {}, pmid = {36832692}, issn = {1099-4300}, support = {P2013-201//Ministry of Education, Culture, Sports, Science and Technology/ ; }, abstract = {Queuing networks (QNs) are essential models in operations research, with applications in cloud computing and healthcare systems. However, few studies have analyzed the cell's biological signal transduction using QN theory. This study entailed the modeling of signal transduction as an open Jackson's QN (JQN) to theoretically determine cell signal transduction, under the assumption that the signal mediator queues in the cytoplasm, and the mediator is exchanged from one signaling molecule to another through interactions between the signaling molecules. Each signaling molecule was regarded as a network node in the JQN. The JQN Kullback-Leibler divergence (KLD) was defined using the ratio of the queuing time (λ) to the exchange time (μ), λ/μ. The mitogen-activated protein kinase (MAPK) signal-cascade model was applied, and the KLD rate per signal-transduction-period was shown to be conserved when the KLD was maximized. Our experimental study on MAPK cascade supported this conclusion. This result is similar to the entropy-rate conservation of chemical kinetics and entropy coding reported in our previous studies. Thus, JQN can be used as a novel framework to analyze signal transduction.}, } @article {pmid36832652, year = {2023}, author = {Chen, D and Zhang, Y}, title = {Diversity-Aware Marine Predators Algorithm for Task Scheduling in Cloud Computing.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {2}, pages = {}, pmid = {36832652}, issn = {1099-4300}, abstract = {With the increase in cloud users and internet of things (IoT) applications, advanced task scheduling (TS) methods are required to reasonably schedule tasks in cloud computing. This study proposes a diversity-aware marine predators algorithm (DAMPA) for solving TS in cloud computing. In DAMPA, to enhance the premature convergence avoidance ability, the predator crowding degree ranking and comprehensive learning strategies were adopted in the second stage to maintain the population diversity and thereby inhibit premature convergence. Additionally, a stage-independent control of the stepsize-scaling strategy that uses different control parameters in three stages was designed to balance the exploration and exploitation abilities. Two case experiments were conducted to evaluate the proposed algorithm. Compared with the latest algorithm, in the first case, DAMPA reduced the makespan and energy consumption by 21.06% and 23.47% at most, respectively. In the second case, the makespan and energy consumption are reduced by 34.35% and 38.60% on average, respectively. Meanwhile, the algorithm achieved greater throughput in both cases.}, } @article {pmid36832648, year = {2023}, author = {Liu, Y and Luo, J and Yang, Y and Wang, X and Gheisari, M and Luo, F}, title = {ShrewdAttack: Low Cost High Accuracy Model Extraction.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {2}, pages = {}, pmid = {36832648}, issn = {1099-4300}, support = {JCYJ20190806142601687//Shenzhen Basic Research (General Project)/ ; GXWD20201230155427003-20200821160539001//Shenzhen Stable Supporting Program (General Project)/ ; PCL2021A02//Peng Cheng Laboratory Project/ ; 2022B1212010005//Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies/ ; }, abstract = {Machine learning as a service (MLaaS) plays an essential role in the current ecosystem. Enterprises do not need to train models by themselves separately. Instead, they can use well-trained models provided by MLaaS to support business activities. However, such an ecosystem could be threatened by model extraction attacks-an attacker steals the functionality of a trained model provided by MLaaS and builds a substitute model locally. In this paper, we proposed a model extraction method with low query costs and high accuracy. In particular, we use pre-trained models and task-relevant data to decrease the size of query data. We use instance selection to reduce query samples. In addition, we divided query data into two categories, namely low-confidence data and high-confidence data, to reduce the budget and improve accuracy. We then conducted attacks on two models provided by Microsoft Azure as our experiments. The results show that our scheme achieves high accuracy at low cost, with the substitution models achieving 96.10% and 95.24% substitution while querying only 7.32% and 5.30% of their training data on the two models, respectively. This new attack approach creates additional security challenges for models deployed on cloud platforms. It raises the need for novel mitigation strategies to secure the models. In future work, generative adversarial networks and model inversion attacks can be used to generate more diverse data to be applied to the attacks.}, } @article {pmid36832632, year = {2023}, author = {Byrne, E and Gnilke, OW and Kliewer, J}, title = {Straggler- and Adversary-Tolerant Secure Distributed Matrix Multiplication Using Polynomial Codes.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {2}, pages = {}, pmid = {36832632}, issn = {1099-4300}, support = {54584//UCD Seed Funding - Horizon Scanning Scheme/ ; 1815322, 1908756, 2107370//U.S. National Science Foundation grants/ ; }, abstract = {Large matrix multiplications commonly take place in large-scale machine-learning applications. Often, the sheer size of these matrices prevent carrying out the multiplication at a single server. Therefore, these operations are typically offloaded to a distributed computing platform with a master server and a large amount of workers in the cloud, operating in parallel. For such distributed platforms, it has been recently shown that coding over the input data matrices can reduce the computational delay by introducing a tolerance against straggling workers, i.e., workers for which execution time significantly lags with respect to the average. In addition to exact recovery, we impose a security constraint on both matrices to be multiplied. Specifically, we assume that workers can collude and eavesdrop on the content of these matrices. For this problem, we introduce a new class of polynomial codes with fewer non-zero coefficients than the degree +1. We provide closed-form expressions for the recovery threshold and show that our construction improves the recovery threshold of existing schemes in the literature, in particular for larger matrix dimensions and a moderate to large number of colluding workers. In the absence of any security constraints, we show that our construction is optimal in terms of recovery threshold.}, } @article {pmid36819757, year = {2023}, author = {Borhani, F and Shafiepour Motlagh, M and Ehsani, AH and Rashidi, Y and Ghahremanloo, M and Amani, M and Moghimi, A}, title = {Current Status and Future Forecast of Short-lived Climate-Forced Ozone in Tehran, Iran, derived from Ground-Based and Satellite Observations.}, journal = {Water, air, and soil pollution}, volume = {234}, number = {2}, pages = {134}, pmid = {36819757}, issn = {0049-6979}, abstract = {In this study, the distribution and alterations of ozone concentrations in Tehran, Iran, in 2021 were investigated. The impacts of precursors (i.e., CO, NO2, and NO) on ozone were examined using the data collected over 12 months (i.e., January 2021 to December 2021) from 21 stations of the Air Quality Control Company (AQCC). The results of monthly heat mapping of tropospheric ozone concentrations indicated the lowest value in December and the highest value in July. The lowest and highest seasonal concentrations were in winter and summer, respectively. Moreover, there was a negative correlation between ozone and its precursors. The Inverse Distance Weighting (IDW) method was then implemented to obtain air pollution zoning maps. Then, ozone concentration modeled by the IDW method was compared with the average monthly change of total column density of ozone derived from Sentinel-5 satellite data in the Google Earth Engine (GEE) cloud platform. A good agreement was discovered despite the harsh circumstances that both ground-based and satellite measurements were subjected to. The results obtained from both datasets showed that the west of the city of Tehran had the highest averaged O3 concentration. In this study, the status of the concentration of ozone precursors and tropospheric ozone in 2022 was also predicted. For this purpose, the Box-Jenkins Seasonal Autoregressive Integrated Moving Average (SARIMA) approach was implemented to predict the monthly air quality parameters. Overall, it was observed that the SARIMA approach was an efficient tool for forecasting air quality. Finally, the results showed that the trends of ozone obtained from terrestrial and satellite observations throughout 2021 were slightly different due to the contribution of the tropospheric ozone precursor concentration and meteorology conditions.}, } @article {pmid36818051, year = {2023}, author = {Stewart, CA and Costa, CM and Wernert, JA and Snapp-Childs, W and Bland, M and Blood, P and Campbell, T and Couvares, P and Fischer, J and Hancock, DY and Hart, DL and Jankowski, H and Knepper, R and McMullen, DF and Mehringer, S and Pierce, M and Rogers, G and Sinkovits, RS and Towns, J}, title = {Use of accounting concepts to study research: return on investment in XSEDE, a US cyberinfrastructure service.}, journal = {Scientometrics}, volume = {128}, number = {6}, pages = {3225-3255}, pmid = {36818051}, issn = {0138-9130}, abstract = {This paper uses accounting concepts-particularly the concept of Return on Investment (ROI)-to reveal the quantitative value of scientific research pertaining to a major US cyberinfrastructure project (XSEDE-the eXtreme Science and Engineering Discovery Environment). XSEDE provides operational and support services for advanced information technology systems, cloud systems, and supercomputers supporting non-classified US research, with an average budget for XSEDE of US$20M+ per year over the period studied (2014-2021). To assess the financial effectiveness of these services, we calculated a proxy for ROI, and converted quantitative measures of XSEDE service delivery into financial values using costs for service from the US marketplace. We calculated two estimates of ROI: a Conservative Estimate, functioning as a lower bound and using publicly available data for a lower valuation of XSEDE services; and a Best Available Estimate, functioning as a more accurate estimate, but using some unpublished valuation data. Using the largest dataset assembled for analysis of ROI for a cyberinfrastructure project, we found a Conservative Estimate of ROI of 1.87, and a Best Available Estimate of ROI of 3.24. Through accounting methods, we show that XSEDE services offer excellent value to the US government, that the services offered uniquely by XSEDE (that is, not otherwise available for purchase) were the most valuable to the facilitation of US research activities, and that accounting-based concepts hold great value for understanding the mechanisms of scientific research generally.}, } @article {pmid36812648, year = {2022}, author = {Jiang, P and Gao, F and Liu, S and Zhang, S and Zhang, X and Xia, Z and Zhang, W and Jiang, T and Zhu, JL and Zhang, Z and Shu, Q and Snyder, M and Li, J}, title = {Longitudinally tracking personal physiomes for precision management of childhood epilepsy.}, journal = {PLOS digital health}, volume = {1}, number = {12}, pages = {e0000161}, pmid = {36812648}, issn = {2767-3170}, abstract = {Our current understanding of human physiology and activities is largely derived from sparse and discrete individual clinical measurements. To achieve precise, proactive, and effective health management of an individual, longitudinal, and dense tracking of personal physiomes and activities is required, which is only feasible by utilizing wearable biosensors. As a pilot study, we implemented a cloud computing infrastructure to integrate wearable sensors, mobile computing, digital signal processing, and machine learning to improve early detection of seizure onsets in children. We recruited 99 children diagnosed with epilepsy and longitudinally tracked them at single-second resolution using a wearable wristband, and prospectively acquired more than one billion data points. This unique dataset offered us an opportunity to quantify physiological dynamics (e.g., heart rate, stress response) across age groups and to identify physiological irregularities upon epilepsy onset. The high-dimensional personal physiome and activity profiles displayed a clustering pattern anchored by patient age groups. These signatory patterns included strong age and sex-specific effects on varying circadian rhythms and stress responses across major childhood developmental stages. For each patient, we further compared the physiological and activity profiles associated with seizure onsets with the personal baseline and developed a machine learning framework to accurately capture these onset moments. The performance of this framework was further replicated in another independent patient cohort. We next referenced our predictions with the electroencephalogram (EEG) signals on selected patients and demonstrated that our approach could detect subtle seizures not recognized by humans and could detect seizures prior to clinical onset. Our work demonstrated the feasibility of a real-time mobile infrastructure in a clinical setting, which has the potential to be valuable in caring for epileptic patients. Extension of such a system has the potential to be leveraged as a health management device or longitudinal phenotyping tool in clinical cohort studies.}, } @article {pmid36812592, year = {2023}, author = {Tabata, K and Mihara, H and Nanjo, S and Motoo, I and Ando, T and Teramoto, A and Fujinami, H and Yasuda, I}, title = {Artificial intelligence model for analyzing colonic endoscopy images to detect changes associated with irritable bowel syndrome.}, journal = {PLOS digital health}, volume = {2}, number = {2}, pages = {e0000058}, pmid = {36812592}, issn = {2767-3170}, abstract = {IBS is not considered to be an organic disease and usually shows no abnormality on lower gastrointestinal endoscopy, although biofilm formation, dysbiosis, and histological microinflammation have recently been reported in patients with IBS. In this study, we investigated whether an artificial intelligence (AI) colorectal image model can identify minute endoscopic changes, which cannot typically be detected by human investigators, that are associated with IBS. Study subjects were identified based on electronic medical records and categorized as IBS (Group I; n = 11), IBS with predominant constipation (IBS-C; Group C; n = 12), and IBS with predominant diarrhea (IBS-D; Group D; n = 12). The study subjects had no other diseases. Colonoscopy images from IBS patients and from asymptomatic healthy subjects (Group N; n = 88) were obtained. Google Cloud Platform AutoML Vision (single-label classification) was used to construct AI image models to calculate sensitivity, specificity, predictive value, and AUC. A total of 2479, 382, 538, and 484 images were randomly selected for Groups N, I, C and D, respectively. The AUC of the model discriminating between Group N and I was 0.95. Sensitivity, specificity, positive predictive value, and negative predictive value of Group I detection were 30.8%, 97.6%, 66.7%, and 90.2%, respectively. The overall AUC of the model discriminating between Groups N, C, and D was 0.83; sensitivity, specificity, and positive predictive value of Group N were 87.5%, 46.2%, and 79.9%, respectively. Using the image AI model, colonoscopy images of IBS could be discriminated from healthy subjects at AUC 0.95. Prospective studies are needed to further validate whether this externally validated model has similar diagnostic capabilities at other facilities and whether it can be used to determine treatment efficacy.}, } @article {pmid36805192, year = {2023}, author = {Brinkhaus, HO and Rajan, K and Schaub, J and Zielesny, A and Steinbeck, C}, title = {Open data and algorithms for open science in AI-driven molecular informatics.}, journal = {Current opinion in structural biology}, volume = {79}, number = {}, pages = {102542}, doi = {10.1016/j.sbi.2023.102542}, pmid = {36805192}, issn = {1879-033X}, mesh = {*Artificial Intelligence ; *Machine Learning ; Algorithms ; Software ; Informatics ; }, abstract = {Recent years have seen a sharp increase in the development of deep learning and artificial intelligence-based molecular informatics. There has been a growing interest in applying deep learning to several subfields, including the digital transformation of synthetic chemistry, extraction of chemical information from the scientific literature, and AI in natural product-based drug discovery. The application of AI to molecular informatics is still constrained by the fact that most of the data used for training and testing deep learning models are not available as FAIR and open data. As open science practices continue to grow in popularity, initiatives which support FAIR and open data as well as open-source software have emerged. It is becoming increasingly important for researchers in the field of molecular informatics to embrace open science and to submit data and software in open repositories. With the advent of open-source deep learning frameworks and cloud computing platforms, academic researchers are now able to deploy and test their own deep learning models with ease. With the development of new and faster hardware for deep learning and the increasing number of initiatives towards digital research data management infrastructures, as well as a culture promoting open data, open source, and open science, AI-driven molecular informatics will continue to grow. This review examines the current state of open data and open algorithms in molecular informatics, as well as ways in which they could be improved in future.}, } @article {pmid36797269, year = {2023}, author = {Lall, A and Tallur, S}, title = {Deep reinforcement learning-based pairwise DNA sequence alignment method compatible with embedded edge devices.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {2773}, pmid = {36797269}, issn = {2045-2322}, mesh = {Sequence Alignment ; *Algorithms ; *Neural Networks, Computer ; Computers ; DNA ; }, abstract = {Sequence alignment is an essential component of bioinformatics, for identifying regions of similarity that may indicate functional, structural, or evolutionary relationships between the sequences. Genome-based diagnostics relying on DNA sequencing have benefited hugely from the boom in computing power in recent decades, particularly due to cloud-computing and the rise of graphics processing units (GPUs) and other advanced computing platforms for running advanced algorithms. Translating the success of such breakthroughs in diagnostics to affordable solutions for low-cost healthcare requires development of algorithms that can operate on the edge instead of in the cloud, using low-cost and low-power electronic systems such as microcontrollers and field programmable gate arrays (FPGAs). In this work, we present EdgeAlign, a deep reinforcement learning based method for performing pairwise DNA sequence alignment on stand-alone edge devices. EdgeAlign uses deep reinforcement learning to train a deep Q-network (DQN) agent for performing sequence alignment on fixed length sub-sequences, using a sliding window that is scanned over the length of the entire sequence. The hardware resource-consumption for implementing this scheme is thus independent of the lengths of the sequences to be aligned, and is further optimized using a novel AutoML based method for neural network model size reduction. Unlike other algorithms for sequence alignment reported in literature, the model demonstrated in this work is highly compact and deployed on two edge devices (NVIDIA Jetson Nano Developer Kit and Digilent Arty A7-100T, containing Xilinx XC7A35T Artix-7 FPGA) for demonstration of alignment for sequences from the publicly available Influenza sequences at the National Center for Biotechnology Information (NCBI) Virus Data Hub.}, } @article {pmid36793418, year = {2023}, author = {A, A and Dahan, F and Alroobaea, R and Alghamdi, WY and Mustafa Khaja Mohammed, and Hajjej, F and Deema Mohammed Alsekait, and Raahemifar, K}, title = {A smart IoMT based architecture for E-healthcare patient monitoring system using artificial intelligence algorithms.}, journal = {Frontiers in physiology}, volume = {14}, number = {}, pages = {1125952}, pmid = {36793418}, issn = {1664-042X}, abstract = {Generally, cloud computing is integrated with wireless sensor network to enable the monitoring systems and it improves the quality of service. The sensed patient data are monitored with biosensors without considering the patient datatype and this minimizes the work of hospitals and physicians. Wearable sensor devices and the Internet of Medical Things (IoMT) have changed the health service, resulting in faster monitoring, prediction, diagnosis, and treatment. Nevertheless, there have been difficulties that need to be resolved by the use of AI methods. The primary goal of this study is to introduce an AI-powered, IoMT telemedicine infrastructure for E-healthcare. In this paper, initially the data collection from the patient body is made using the sensed devices and the information are transmitted through the gateway/Wi-Fi and is stored in IoMT cloud repository. The stored information is then acquired, preprocessed to refine the collected data. The features from preprocessed data are extracted by means of high dimensional Linear Discriminant analysis (LDA) and the best optimal features are selected using reconfigured multi-objective cuckoo search algorithm (CSA). The prediction of abnormal/normal data is made by using Hybrid ResNet 18 and GoogleNet classifier (HRGC). The decision is then made whether to send alert to hospitals/healthcare personnel or not. If the expected results are satisfactory, the participant information is saved in the internet for later use. At last, the performance analysis is carried so as to validate the efficiency of proposed mechanism.}, } @article {pmid36789435, year = {2023}, author = {Camacho, C and Boratyn, GM and Joukov, V and Alvarez, RV and Madden, TL}, title = {ElasticBLAST: Accelerating Sequence Search via Cloud Computing.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {36789435}, issn = {2692-8205}, abstract = {BACKGROUND: Biomedical researchers use alignments produced by BLAST (Basic Local Alignment Search Tool) to categorize their query sequences. Producing such alignments is an essential bioinformatics task that is well suited for the cloud. The cloud can perform many calculations quickly as well as store and access large volumes of data. Bioinformaticians can also use it to collaborate with other researchers, sharing their results, datasets and even their pipelines on a common platform.

RESULTS: We present ElasticBLAST, a cloud native application to perform BLAST alignments in the cloud. ElasticBLAST can handle anywhere from a few to many thousands of queries and run the searches on thousands of virtual CPUs (if desired), deleting resources when it is done. It uses cloud native tools for orchestration and can request discounted instances, lowering cloud costs for users. It is supported on Amazon Web Services and Google Cloud Platform. It can search BLAST databases that are user provided or from the National Center for Biotechnology Information.

CONCLUSION: We show that ElasticBLAST is a useful application that can efficiently perform BLAST searches for the user in the cloud, demonstrating that with two examples. At the same time, it hides much of the complexity of working in the cloud, lowering the threshold to move work to the cloud.}, } @article {pmid36789367, year = {2023}, author = {Guo, YG and Yin, Q and Wang, Y and Xu, J and Zhu, L}, title = {Efficiency and optimization of government service resource allocation in a cloud computing environment.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {12}, number = {1}, pages = {18}, pmid = {36789367}, issn = {2192-113X}, abstract = {According to the connotation and structure of government service resources, data of government service resources in L city from 2019 to 2021 are used to calculate the efficiency of government service resource allocation in each county and region in different periods, particularly by adding the government cloud platform and cloud computing resources to the government service resource data and applying the data envelopment analysis (DEA) method, which has practical significance for the development and innovation of government services. On this basis, patterns and evolutionary trends of government service resource allocation efficiency in each region during the study period are analyzed and discussed. Results are as follows. i) Overall efficiency level in the allocation of government service resources in L city is not high, showing an increasing annual trend among the high and low staggering. ii) Relative difference of allocation efficiency of government service resources is a common phenomenon of regional development, the existence and evolution of which are the direct or indirect influence and reflection of various aspects, such as economic strength and reform effort. iii) Data analysis for the specific points indicates that increased input does not necessarily lead to increased efficiency, some indicators have insufficient input or redundant output. Therefore, optimization of the physical, human, and financial resource allocation methods; and the intelligent online processing of government services achieved by the adoption of government cloud platform and cloud computing resources are the current objective choices to realize maximum efficiency in the allocation of government service resources.}, } @article {pmid36788990, year = {2023}, author = {Shrestha, S and Stapp, J and Taylor, M and Leach, R and Carreiro, S and Indic, P}, title = {Towards Device Agnostic Detection of Stress and Craving in Patients with Substance Use Disorder.}, journal = {Proceedings of the ... Annual Hawaii International Conference on System Sciences. Annual Hawaii International Conference on System Sciences}, volume = {2023}, number = {}, pages = {3156-3163}, pmid = {36788990}, issn = {1530-1605}, support = {R44 DA046151/DA/NIDA NIH HHS/United States ; }, abstract = {Novel technologies have great potential to improve the treatment of individuals with substance use disorder (SUD) and to reduce the current high rate of relapse (i.e. return to drug use). Wearable sensor-based systems that continuously measure physiology can provide information about behavior and opportunities for real-time interventions. We have previously developed an mHealth system which includes a wearable sensor, a mobile phone app, and a cloud-based server with embedded machine learning algorithms which detect stress and craving. The system functions as a just-in-time intervention tool to help patients de-escalate and as a tool for clinicians to tailor treatment based on stress and craving patterns observed. However, in our pilot work we found that to deploy the system to diverse socioeconomic populations and to increase usability, the system must be able to work efficiently with cost-effective and popular commercial wearable devices. To make the system device agnostic, methods to transform the data from a commercially available wearable for use in algorithms developed from research grade wearable sensor are proposed. The accuracy of these transformations in detecting stress and craving in individuals with SUD is further explored.}, } @article {pmid36785195, year = {2023}, author = {Zhao, Y and Bu, JW and Liu, W and Ji, JH and Yang, QH and Lin, SF}, title = {Implementation of a full-color holographic system using RGB-D salient object detection and divided point cloud gridding.}, journal = {Optics express}, volume = {31}, number = {2}, pages = {1641-1655}, doi = {10.1364/OE.477666}, pmid = {36785195}, issn = {1094-4087}, abstract = {At present, a real objects-based full-color holographic system usually uses a digital single-lens reflex (DSLR) camera array or depth camera to collect data. It then relies on a spatial light modulator to modulate the input light source for the reconstruction of the 3-D scene of the real objects. However, the main challenges the high-quality holographic 3-D display faced were the limitation of generation speed and the low accuracy of the computer-generated holograms. This research generates more effective and accurate point cloud data by developing an RGB-D salient object detection model in the acquisition unit. In addition, a divided point cloud gridding method is proposed to enhance the computing speed of hologram generation. In the RGB channels, we categorized each object point into depth grids with identical depth values. The depth girds are divided into M × N parts, and only the effective parts will be calculated. Compared with traditional methods, the calculation time is dramatically reduced. The feasibility of our proposed approach is established through experiments.}, } @article {pmid36776787, year = {2023}, author = {Zahid, MA and Shafiq, B and Vaidya, J and Afzal, A and Shamail, S}, title = {Collaborative Business Process Fault Resolution in the Services Cloud.}, journal = {IEEE transactions on services computing}, volume = {16}, number = {1}, pages = {162-176}, doi = {10.1109/tsc.2021.3112525}, pmid = {36776787}, issn = {1939-1374}, support = {R01 GM118574/GM/NIGMS NIH HHS/United States ; R35 GM134927/GM/NIGMS NIH HHS/United States ; }, abstract = {The emergence of cloud and edge computing has enabled rapid development and deployment of Internet-centric distributed applications. There are many platforms and tools that can facilitate users to develop distributed business process (BP) applications by composing relevant service components in a plug and play manner. However, there is no guarantee that a BP application developed in this way is fault-free. In this paper, we formalize the problem of collaborative BP fault resolution which aims to utilize information from existing fault-free BPs that use similar services to resolve faults in a user developed BP. We present an approach based on association analysis of pairwise transformations between a faulty BP and existing BPs to identify the smallest possible set of transformations to resolve the fault(s) in the user developed BP. An extensive experimental evaluation over both synthetically generated faulty BPs and real BPs developed by users shows the effectiveness of our approach.}, } @article {pmid36761837, year = {2022}, author = {Tercan, B and Qin, G and Kim, TK and Aguilar, B and Phan, J and Longabaugh, W and Pot, D and Kemp, CJ and Chambwe, N and Shmulevich, I}, title = {SL-Cloud: A Cloud-based resource to support synthetic lethal interaction discovery.}, journal = {F1000Research}, volume = {11}, number = {}, pages = {493}, pmid = {36761837}, issn = {2046-1402}, support = {U01 CA217883/CA/NCI NIH HHS/United States ; P01 CA077852/CA/NCI NIH HHS/United States ; HHSN261201500003I/CA/NCI NIH HHS/United States ; HHSN261201400008C/CA/NCI NIH HHS/United States ; }, mesh = {Humans ; *Cloud Computing ; *Neoplasms/genetics ; Systems Biology ; Multiomics ; }, abstract = {Synthetic lethal interactions (SLIs), genetic interactions in which the simultaneous inactivation of two genes leads to a lethal phenotype, are promising targets for therapeutic intervention in cancer, as exemplified by the recent success of PARP inhibitors in treating BRCA1/2-deficient tumors. We present SL-Cloud, a new component of the Institute for Systems Biology Cancer Gateway in the Cloud (ISB-CGC), that provides an integrated framework of cloud-hosted data resources and curated workflows to enable facile prediction of SLIs. This resource addresses two main challenges related to SLI inference: the need to wrangle and preprocess large multi-omic datasets and the availability of multiple comparable prediction approaches. SL-Cloud enables customizable computational inference of SLIs and testing of prediction approaches across multiple datasets. We anticipate that cancer researchers will find utility in this tool for discovery of SLIs to support further investigation into potential drug targets for anticancer therapies.}, } @article {pmid36772751, year = {2023}, author = {Gayathri, R and Usharani, S and Mahdal, M and Vezhavendhan, R and Vincent, R and Rajesh, M and Elangovan, M}, title = {Detection and Mitigation of IoT-Based Attacks Using SNMP and Moving Target Defense Techniques.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, pmid = {36772751}, issn = {1424-8220}, support = {SP2022/60//AEC (Czechia)/ ; }, abstract = {This paper proposes a solution for ensuring the security of IoT devices in the cloud environment by protecting against distributed denial-of-service (DDoS) and false data injection attacks. The proposed solution is based on the integration of simple network management protocol (SNMP), Kullback-Leibler distance (KLD), access control rules (ACL), and moving target defense (MTD) techniques. The SNMP and KLD techniques are used to detect DDoS and false data sharing attacks, while the ACL and MTD techniques are applied to mitigate these attacks by hardening the target and reducing the attack surface. The effectiveness of the proposed framework is validated through experimental simulations on the Amazon Web Service (AWS) platform, which shows a significant reduction in attack probabilities and delays. The integration of IoT and cloud technologies is a powerful combination that can deliver customized and critical solutions to major business vendors. However, ensuring the confidentiality and security of data among IoT devices, storage, and access to the cloud is crucial to maintaining trust among internet users. This paper demonstrates the importance of implementing robust security measures to protect IoT devices in the cloud environment and highlights the potential of the proposed solution in protecting against DDoS and false data injection attacks.}, } @article {pmid36772680, year = {2023}, author = {Bourechak, A and Zedadra, O and Kouahla, MN and Guerrieri, A and Seridi, H and Fortino, G}, title = {At the Confluence of Artificial Intelligence and Edge Computing in IoT-Based Applications: A Review and New Perspectives.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, pmid = {36772680}, issn = {1424-8220}, support = {CUP H24I17000070001//Italian MIUR/ ; #101092912//European Union/ ; IR0000013//European Union/ ; }, abstract = {Given its advantages in low latency, fast response, context-aware services, mobility, and privacy preservation, edge computing has emerged as the key support for intelligent applications and 5G/6G Internet of things (IoT) networks. This technology extends the cloud by providing intermediate services at the edge of the network and improving the quality of service for latency-sensitive applications. Many AI-based solutions with machine learning, deep learning, and swarm intelligence have exhibited the high potential to perform intelligent cognitive sensing, intelligent network management, big data analytics, and security enhancement for edge-based smart applications. Despite its many benefits, there are still concerns about the required capabilities of intelligent edge computing to deal with the computational complexity of machine learning techniques for big IoT data analytics. Resource constraints of edge computing, distributed computing, efficient orchestration, and synchronization of resources are all factors that require attention for quality of service improvement and cost-effective development of edge-based smart applications. In this context, this paper aims to explore the confluence of AI and edge in many application domains in order to leverage the potential of the existing research around these factors and identify new perspectives. The confluence of edge computing and AI improves the quality of user experience in emergency situations, such as in the Internet of vehicles, where critical inaccuracies or delays can lead to damage and accidents. These are the same factors that most studies have used to evaluate the success of an edge-based application. In this review, we first provide an in-depth analysis of the state of the art of AI in edge-based applications with a focus on eight application areas: smart agriculture, smart environment, smart grid, smart healthcare, smart industry, smart education, smart transportation, and security and privacy. Then, we present a qualitative comparison that emphasizes the main objective of the confluence, the roles and the use of artificial intelligence at the network edge, and the key enabling technologies for edge analytics. Then, open challenges, future research directions, and perspectives are identified and discussed. Finally, some conclusions are drawn.}, } @article {pmid36772662, year = {2023}, author = {Witanto, EN and Stanley, B and Lee, SG}, title = {Distributed Data Integrity Verification Scheme in Multi-Cloud Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, pmid = {36772662}, issn = {1424-8220}, support = {2018R1D1A1B07047601//National Research Foundation of Korea/ ; }, abstract = {Most existing data integrity auditing protocols in cloud storage rely on proof of probabilistic data possession. Consequently, the sampling rate of data integrity verification is low to prevent expensive costs to the auditor. However, in the case of a multi-cloud environment, the amount of stored data will be huge. As a result, a higher sampling rate is needed. It will also have an increased cost for the auditor as a consequence. Therefore, this paper proposes a blockchain-based distributed data integrity verification protocol in multi-cloud environments that enables data verification using multi-verifiers. The proposed scheme aims to increase the sampling rate of data verification without increasing the costs significantly. The performance analysis shows that this protocol achieved a lower time consumption required for verification tasks using multi-verifiers than a single verifier. Furthermore, utilizing multi-verifiers also decreases each verifier's computation and communication costs.}, } @article {pmid36772584, year = {2023}, author = {Alexandrescu, A}, title = {Parallel Processing of Sensor Data in a Distributed Rules Engine Environment through Clustering and Data Flow Reconfiguration.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, pmid = {36772584}, issn = {1424-8220}, abstract = {An emerging reality is the development of smart buildings and cities, which improve residents' comfort. These environments employ multiple sensor networks, whose data must be acquired and processed in real time by multiple rule engines, which trigger events that enable specific actuators. The problem is how to handle those data in a scalable manner by using multiple processing instances to maximize the system throughput. This paper considers the types of sensors that are used in these scenarios and proposes a model for abstracting the information flow as a weighted dependency graph. Two parallel computing methods are then proposed for obtaining an efficient data flow: a variation of the parallel k-means clustering algorithm and a custom genetic algorithm. Simulation results show that the two proposed flow reconfiguration algorithms reduce the rule processing times and provide an efficient solution for increasing the scalability of the considered environment. Another aspect being discussed is using an open-source cloud solution to manage the system and how to use the two algorithms to increase efficiency. These methods allow for a seamless increase in the number of sensors in the environment by making smart use of the available resources.}, } @article {pmid36772562, year = {2023}, author = {Kim, SH and Kim, T}, title = {Local Scheduling in KubeEdge-Based Edge Computing Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, pmid = {36772562}, issn = {1424-8220}, support = {NRF-2022R1I1A3072355//National Research Foundation of Korea/ ; }, abstract = {KubeEdge is an open-source platform that orchestrates containerized Internet of Things (IoT) application services in IoT edge computing environments. Based on Kubernetes, it supports heterogeneous IoT device protocols on edge nodes and provides various functions necessary to build edge computing infrastructure, such as network management between cloud and edge nodes. However, the resulting cloud-based systems are subject to several limitations. In this study, we evaluated the performance of KubeEdge in terms of the computational resource distribution and delay between edge nodes. We found that forwarding traffic between edge nodes degrades the throughput of clusters and causes service delay in edge computing environments. Based on these results, we proposed a local scheduling scheme that handles user traffic locally at each edge node. The performance evaluation results revealed that local scheduling outperforms the existing load-balancing algorithm in the edge computing environment.}, } @article {pmid36772506, year = {2023}, author = {Wang, M and Li, C and Wang, X and Piao, Z and Yang, Y and Dai, W and Zhang, Q}, title = {Research on Comprehensive Evaluation and Early Warning of Transmission Lines' Operation Status Based on Dynamic Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, pmid = {36772506}, issn = {1424-8220}, support = {20220201075GX.//Science and Technology Department Plan Project of Jilin Province of China/ ; }, abstract = {The current methods for evaluating the operating condition of electricity transmission lines (ETLs) and providing early warning have several problems, such as the low correlation of data, ignoring the influence of seasonal factors, and strong subjectivity. This paper analyses the sensitive factors that influence dynamic key evaluation indices such as grounding resistance, sag, and wire corrosion, establishes the evaluation criteria of the ETL operation state, and proposes five ETL status levels and seven principles for selecting evaluation indices. Nine grade I evaluation indices and twenty-nine grade II evaluation indices, including passageway and meteorological environments, are determined. The cloud model theory is embedded and used to propose a warning technology for the operation state of ETLs based on inspection defect parameters and the cloud model. Combined with the inspection defect parameters of a line in the Baicheng district of Jilin Province and the critical evaluation index data such as grounding resistance, sag, and wire corrosion, which are used to calculate the timeliness of the data, the solid line is evaluated. The research shows that the dynamic evaluation model is correct and that the ETL status evaluation and early warning method have reasonable practicability.}, } @article {pmid36772424, year = {2023}, author = {Mangalampalli, S and Karri, GR and Elngar, AA}, title = {An Efficient Trust-Aware Task Scheduling Algorithm in Cloud Computing Using Firefly Optimization.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, pmid = {36772424}, issn = {1424-8220}, abstract = {Task scheduling in the cloud computing paradigm poses a challenge for researchers as the workloads that come onto cloud platforms are dynamic and heterogeneous. Therefore, scheduling these heterogeneous tasks to the appropriate virtual resources is a huge challenge. The inappropriate assignment of tasks to virtual resources leads to the degradation of the quality of services and thereby leads to a violation of the SLA metrics, ultimately leading to the degradation of trust in the cloud provider by the cloud user. Therefore, to preserve trust in the cloud provider and to improve the scheduling process in the cloud paradigm, we propose an efficient task scheduling algorithm that considers the priorities of tasks as well as virtual machines, thereby scheduling tasks accurately to appropriate VMs. This scheduling algorithm is modeled using firefly optimization. The workload for this approach is considered by using fabricated datasets with different distributions and the real-time worklogs of HPC2N and NASA were considered. This algorithm was implemented by using a Cloudsim simulation environment and, finally, our proposed approach is compared over the baseline approaches of ACO, PSO, and the GA. The simulation results revealed that our proposed approach has shown a significant impact over the baseline approaches by minimizing the makespan, availability, success rate, and turnaround efficiency.}, } @article {pmid36772335, year = {2023}, author = {Markus, A and Al-Haboobi, A and Kecskemeti, G and Kertesz, A}, title = {Simulating IoT Workflows in DISSECT-CF-Fog.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, pmid = {36772335}, issn = {1424-8220}, support = {UNKP-22-3//New National Excellence Program of the Ministry for Innovation and Technology from the source of the National Research, Development and Innovation Fund/ ; OTKA FK 131793//Hungarian Scientific Research Fund/ ; TKP2021-NVA-09//Ministry of Innovation and Technology of Hungary from the National Research, Development and Innovation Fund/ ; }, abstract = {The modelling of IoT applications utilising the resources of cloud and fog computing is not straightforward because they have to support various trigger-based events that make human life easier. The sequence of tasks, such as performing a service call, receiving a data packet in the form of a message sent by an IoT device, and managing actuators or executing a computational task on a virtual machine, are often associated with and composed of IoT workflows. The development and deployment of such IoT workflows and their management systems in real life, including communication and network operations, can be complicated due to high operation costs and access limitations. Therefore, simulation solutions are often applied for such purposes. In this paper, we introduce a novel simulator extension of the DISSECT-CF-Fog simulator that leverages the workflow scheduling and its execution capabilities to model real-life IoT use cases. We also show that state-of-the-art simulators typically omit the IoT factor in the case of the scientific workflow evaluation. Therefore, we present a scalability study focusing on scientific workflows and on the interoperability of scientific and IoT workflows in DISSECT-CF-Fog.}, } @article {pmid36772304, year = {2023}, author = {Yu, L and He, M and Liang, H and Xiong, L and Liu, Y}, title = {A Blockchain-Based Authentication and Authorization Scheme for Distributed Mobile Cloud Computing Services.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, pmid = {36772304}, issn = {1424-8220}, support = {2019M663475//China Postdoctoral Science Foundation/ ; 2020JDRC0100//Science and Technology Fund of Sichuan Province/ ; 2021-YF08-00151-GX//Chengdu Science and Technology Program under grant/ ; }, abstract = {Authentication and authorization constitute the essential security component, access control, for preventing unauthorized access to cloud services in mobile cloud computing (MCC) environments. Traditional centralized access control models relying on third party trust face a critical challenge due to a high trust cost and single point of failure. Blockchain can achieve the distributed trust for access control designs in a mutual untrustworthy scenario, but it also leads to expensive storage overhead. Considering the above issues, this work constructed an authentication and authorization scheme based on blockchain that can provide a dynamic update of access permissions by utilizing the smart contract. Compared with the conventional authentication scheme, the proposed scheme integrates an extra authorization function without additional computation and communication costs in the authentication phase. To improve the storage efficiency and system scalability, only one transaction is required to be stored in blockchain to record a user's access privileges on different service providers (SPs). In addition, mobile users in the proposed scheme are able to register with an arbitrary SP once and then utilize the same credential to access different SPs with different access levels. The security analysis indicates that the proposed scheme is secure under the random oracle model. The performance analysis clearly shows that the proposed scheme possesses superior computation and communication efficiencies and requires a low blockchain storage capacity for accomplishing user registration and updates.}, } @article {pmid36772101, year = {2023}, author = {Yang, J and Zheng, J and Wang, H and Li, J and Sun, H and Han, W and Jiang, N and Tan, YA}, title = {Edge-Cloud Collaborative Defense against Backdoor Attacks in Federated Learning.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, pmid = {36772101}, issn = {1424-8220}, support = {62072037//National Natural Science Foundation of China/ ; }, abstract = {Federated learning has a distributed collaborative training mode, widely used in IoT scenarios of edge computing intelligent services. However, federated learning is vulnerable to malicious attacks, mainly backdoor attacks. Once an edge node implements a backdoor attack, the embedded backdoor mode will rapidly expand to all relevant edge nodes, which poses a considerable challenge to security-sensitive edge computing intelligent services. In the traditional edge collaborative backdoor defense method, only the cloud server is trusted by default. However, edge computing intelligent services have limited bandwidth and unstable network connections, which make it impossible for edge devices to retrain their models or update the global model. Therefore, it is crucial to detect whether the data of edge nodes are polluted in time. This paper proposes a layered defense framework for edge-computing intelligent services. At the edge, we combine the gradient rising strategy and attention self-distillation mechanism to maximize the correlation between edge device data and edge object categories and train a clean model as much as possible. On the server side, we first implement a two-layer backdoor detection mechanism to eliminate backdoor updates and use the attention self-distillation mechanism to restore the model performance. Our results show that the two-stage defense mode is more suitable for the security protection of edge computing intelligent services. It can not only weaken the effectiveness of the backdoor at the edge end but also conduct this defense at the server end, making the model more secure. The precision of our model on the main task is almost the same as that of the clean model.}, } @article {pmid36770943, year = {2023}, author = {Kumar, A and Arantes, PR and Saha, A and Palermo, G and Wong, BM}, title = {GPU-Enhanced DFTB Metadynamics for Efficiently Predicting Free Energies of Biochemical Systems.}, journal = {Molecules (Basel, Switzerland)}, volume = {28}, number = {3}, pages = {}, pmid = {36770943}, issn = {1420-3049}, support = {R01 GM141329/GM/NIGMS NIH HHS/United States ; CHE-2144823//National Science Foundation/ ; CHE-2028365//National Science Foundation/ ; R01GM141329/NH/NIH HHS/United States ; }, abstract = {Metadynamics calculations of large chemical systems with ab initio methods are computationally prohibitive due to the extensive sampling required to simulate the large degrees of freedom in these systems. To address this computational bottleneck, we utilized a GPU-enhanced density functional tight binding (DFTB) approach on a massively parallelized cloud computing platform to efficiently calculate the thermodynamics and metadynamics of biochemical systems. To first validate our approach, we calculated the free-energy surfaces of alanine dipeptide and showed that our GPU-enhanced DFTB calculations qualitatively agree with computationally-intensive hybrid DFT benchmarks, whereas classical force fields give significant errors. Most importantly, we show that our GPU-accelerated DFTB calculations are significantly faster than previous approaches by up to two orders of magnitude. To further extend our GPU-enhanced DFTB approach, we also carried out a 10 ns metadynamics simulation of remdesivir, which is prohibitively out of reach for routine DFT-based metadynamics calculations. We find that the free-energy surfaces of remdesivir obtained from DFTB and classical force fields differ significantly, where the latter overestimates the internal energy contribution of high free-energy states. Taken together, our benchmark tests, analyses, and extensions to large biochemical systems highlight the use of GPU-enhanced DFTB simulations for efficiently predicting the free-energy surfaces/thermodynamics of large biochemical systems.}, } @article {pmid36768346, year = {2023}, author = {Sarkar, C and Das, B and Rawat, VS and Wahlang, JB and Nongpiur, A and Tiewsoh, I and Lyngdoh, NM and Das, D and Bidarolli, M and Sony, HT}, title = {Artificial Intelligence and Machine Learning Technology Driven Modern Drug Discovery and Development.}, journal = {International journal of molecular sciences}, volume = {24}, number = {3}, pages = {}, pmid = {36768346}, issn = {1422-0067}, mesh = {Humans ; *Artificial Intelligence ; *Machine Learning ; Neural Networks, Computer ; Drug Discovery/methods ; Technology ; Drug Design ; }, abstract = {The discovery and advances of medicines may be considered as the ultimate relevant translational science effort that adds to human invulnerability and happiness. But advancing a fresh medication is a quite convoluted, costly, and protracted operation, normally costing USD ~2.6 billion and consuming a mean time span of 12 years. Methods to cut back expenditure and hasten new drug discovery have prompted an arduous and compelling brainstorming exercise in the pharmaceutical industry. The engagement of Artificial Intelligence (AI), including the deep-learning (DL) component in particular, has been facilitated by the employment of classified big data, in concert with strikingly reinforced computing prowess and cloud storage, across all fields. AI has energized computer-facilitated drug discovery. An unrestricted espousing of machine learning (ML), especially DL, in many scientific specialties, and the technological refinements in computing hardware and software, in concert with various aspects of the problem, sustain this progress. ML algorithms have been extensively engaged for computer-facilitated drug discovery. DL methods, such as artificial neural networks (ANNs) comprising multiple buried processing layers, have of late seen a resurgence due to their capability to power automatic attribute elicitations from the input data, coupled with their ability to obtain nonlinear input-output pertinencies. Such features of DL methods augment classical ML techniques which bank on human-contrived molecular descriptors. A major part of the early reluctance concerning utility of AI in pharmaceutical discovery has begun to melt, thereby advancing medicinal chemistry. AI, along with modern experimental technical knowledge, is anticipated to invigorate the quest for new and improved pharmaceuticals in an expeditious, economical, and increasingly compelling manner. DL-facilitated methods have just initiated kickstarting for some integral issues in drug discovery. Many technological advances, such as "message-passing paradigms", "spatial-symmetry-preserving networks", "hybrid de novo design", and other ingenious ML exemplars, will definitely come to be pervasively widespread and help dissect many of the biggest, and most intriguing inquiries. Open data allocation and model augmentation will exert a decisive hold during the progress of drug discovery employing AI. This review will address the impending utilizations of AI to refine and bolster the drug discovery operation.}, } @article {pmid36763944, year = {2023}, author = {Shahinyan, GK and Hu, MY and Jiang, T and Osadchiy, V and Sigalos, JT and Mills, JN and Kachroo, N and Eleswarapu, SV}, title = {Cannabis and male sexual health: contemporary qualitative review and insight into perspectives of young men on the internet.}, journal = {Sexual medicine reviews}, volume = {11}, number = {2}, pages = {139-150}, doi = {10.1093/sxmrev/qeac010}, pmid = {36763944}, issn = {2050-0521}, mesh = {Humans ; Male ; United States ; *Sexual Health ; *Cannabis/adverse effects ; Quality of Life ; Men's Health ; Internet ; }, abstract = {INTRODUCTION: Cannabis use is increasing across the United States, yet its short- and long-term effects on sexual function remain controversial. Currently, there is a paucity of studies exploring the relationship between cannabis and men's health.

OBJECTIVES: To summarize the available literature on cannabis and men's health and provide insight into lay perceptions of this topic.

METHODS: We performed a qualitative PubMed review of the existing literature on cannabis and men's health according to the PRISMA guidelines. Separately, we analyzed relevant themes in online men's health forums. We utilized a Google cloud-based platform (BigQuery) to extract relevant posts from 5 men's health Reddit forums from August 2018 to August 2019. We conducted a qualitative thematic analysis of the posts and quantitatively analyzed them using natural language processing and a meaning extraction method with principal component analysis.

RESULTS: Our literature review revealed a mix of animal and human studies demonstrating the negative effects of cannabis on semen parameters and varying effects on erectile function and hormone levels. In our analysis of 372 686 Reddit posts, 1190 (0.3%) included relevant discussion on cannabis and men's health. An overall 272 posts were manually analyzed, showing that online discussions revolve around seeking answers and sharing the effects of cannabis on various aspects of sexual health and quality of life, often with conflicting experiences. Quantitative analysis revealed 1 thematic cluster related to cannabis, insecurity, and mental/physical health.

CONCLUSIONS: There is a limited number of quality human studies investigating the effects of cannabis on men's health. Men online are uncertain about how cannabis affects their sexual health and seek more information. As the prevalence of cannabis use increases, so does the need for research in this area.}, } @article {pmid36757918, year = {2023}, author = {Pollak, DJ and Chawla, G and Andreev, A and Prober, DA}, title = {First steps into the cloud: Using Amazon data storage and computing with Python notebooks.}, journal = {PloS one}, volume = {18}, number = {2}, pages = {e0278316}, pmid = {36757918}, issn = {1932-6203}, support = {R35 NS122172/NS/NINDS NIH HHS/United States ; UF1 NS126562/NS/NINDS NIH HHS/United States ; T32 NS105595/NS/NINDS NIH HHS/United States ; }, mesh = {Animals ; *Zebrafish ; *Software ; Programming Languages ; Information Storage and Retrieval ; Cloud Computing ; }, abstract = {With the oncoming age of big data, biologists are encountering more use cases for cloud-based computing to streamline data processing and storage. Unfortunately, cloud platforms are difficult to learn, and there are few resources for biologists to demystify them. We have developed a guide for experimental biologists to set up cloud processing on Amazon Web Services to cheaply outsource data processing and storage. Here we provide a guide for setting up a computing environment in the cloud and showcase examples of using Python and Julia programming languages. We present example calcium imaging data in the zebrafish brain and corresponding analysis using suite2p software. Tools for budget and user management are further discussed in the attached protocol. Using this guide, researchers with limited coding experience can get started with cloud-based computing or move existing coding infrastructure into the cloud environment.}, } @article {pmid36754821, year = {2023}, author = {Bosia, F and Zheng, P and Vaucher, A and Weymuth, T and Dral, PO and Reiher, M}, title = {Ultra-fast semi-empirical quantum chemistry for high-throughput computational campaigns with Sparrow.}, journal = {The Journal of chemical physics}, volume = {158}, number = {5}, pages = {054118}, doi = {10.1063/5.0136404}, pmid = {36754821}, issn = {1089-7690}, abstract = {Semi-empirical quantum chemical approaches are known to compromise accuracy for the feasibility of calculations on huge molecules. However, the need for ultrafast calculations in interactive quantum mechanical studies, high-throughput virtual screening, and data-driven machine learning has shifted the emphasis toward calculation runtimes recently. This comes with new constraints for the software implementation as many fast calculations would suffer from a large overhead of the manual setup and other procedures that are comparatively fast when studying a single molecular structure, but which become prohibitively slow for high-throughput demands. In this work, we discuss the effect of various well-established semi-empirical approximations on calculation speed and relate this to data transfer rates from the raw-data source computer to the results of the visualization front end. For the former, we consider desktop computers, local high performance computing, and remote cloud services in order to elucidate the effect on interactive calculations, for web and cloud interfaces in local applications, and in world-wide interactive virtual sessions. The models discussed in this work have been implemented into our open-source software SCINE Sparrow.}, } @article {pmid36753980, year = {2023}, author = {Cubillos, LH and Augenstein, TE and Ranganathan, R and Krishnan, C}, title = {Breaking the barriers to designing online experiments: A novel open-source platform for supporting procedural skill learning experiments.}, journal = {Computers in biology and medicine}, volume = {154}, number = {}, pages = {106627}, doi = {10.1016/j.compbiomed.2023.106627}, pmid = {36753980}, issn = {1879-0534}, mesh = {Humans ; *Psychomotor Performance ; *Motor Skills ; Learning ; Hand ; }, abstract = {BACKGROUND: Motor learning experiments are typically performed in laboratory environments, which can be time-consuming and require dedicated equipment/personnel, thus limiting the ability to gather data from large samples. To address this problem, some researchers have transitioned to unsupervised online experiments, showing advantages in participant recruitment without losing validity. However, most online platforms require coding experience or time-consuming setups to create and run experiments, limiting their usage across the field.

METHOD: To tackle this issue, an open-source web-based platform was developed (https://experiments.neurro-lab.engin.umich.edu/) to create, run, and manage procedural skill learning experiments without coding or setup requirements. The feasibility of the platform and the comparability of the results between supervised (n = 17) and unsupervised (n = 24) were tested in 41 naive right-handed participants using an established sequential finger tapping task. The study also tested if a previously reported rapid form of offline consolidation (i.e., microscale learning) in procedural skill learning could be replicated with the developed platform and evaluated the extent of interlimb transfer associated with the finger tapping task.

RESULTS: The results indicated that the performance metrics were comparable between the supervised and unsupervised groups (all p's > 0.05). The learning curves, mean tapping speeds, and micro-scale learning were similar to previous studies. Training led to significant improvements in mean tapping speed (2.22 ± 1.48 keypresses/s, p < 0.001) and a significant interlimb transfer of learning (1.22 ± 1.43 keypresses/s, p < 0.05).

CONCLUSIONS: The results show that the presented platform may serve as a valuable tool for conducting online procedural skill-learning experiments.}, } @article {pmid36750410, year = {2023}, author = {Raucci, U and Weir, H and Sakshuwong, S and Seritan, S and Hicks, CB and Vannucci, F and Rea, F and Martínez, TJ}, title = {Interactive Quantum Chemistry Enabled by Machine Learning, Graphical Processing Units, and Cloud Computing.}, journal = {Annual review of physical chemistry}, volume = {74}, number = {}, pages = {313-336}, doi = {10.1146/annurev-physchem-061020-053438}, pmid = {36750410}, issn = {1545-1593}, abstract = {Modern quantum chemistry algorithms are increasingly able to accurately predict molecular properties that are useful for chemists in research and education. Despite this progress, performing such calculations is currently unattainable to the wider chemistry community, as they often require domain expertise, computer programming skills, and powerful computer hardware. In this review, we outline methods to eliminate these barriers using cutting-edge technologies. We discuss the ingredients needed to create accessible platforms that can compute quantum chemistry properties in real time, including graphical processing units-accelerated quantum chemistry in the cloud, artificial intelligence-driven natural molecule input methods, and extended reality visualization. We end by highlighting a series of exciting applications that assemble these components to create uniquely interactive platforms for computing and visualizing spectra, 3D structures, molecular orbitals, and many other chemical properties.}, } @article {pmid36747613, year = {2024}, author = {Koenig, Z and Yohannes, MT and Nkambule, LL and Zhao, X and Goodrich, JK and Kim, HA and Wilson, MW and Tiao, G and Hao, SP and Sahakian, N and Chao, KR and Walker, MA and Lyu, Y and , and Rehm, HL and Neale, BM and Talkowski, ME and Daly, MJ and Brand, H and Karczewski, KJ and Atkinson, EG and Martin, AR}, title = {A harmonized public resource of deeply sequenced diverse human genomes.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {36747613}, issn = {2692-8205}, support = {P30 DK043351/DK/NIDDK NIH HHS/United States ; R00 MH117229/MH/NIMH NIH HHS/United States ; R01 DE031261/DE/NIDCR NIH HHS/United States ; R01 MH115957/MH/NIMH NIH HHS/United States ; }, abstract = {Underrepresented populations are often excluded from genomic studies due in part to a lack of resources supporting their analyses. The 1000 Genomes Project (1kGP) and Human Genome Diversity Project (HGDP), which have recently been sequenced to high coverage, are valuable genomic resources because of the global diversity they capture and their open data sharing policies. Here, we harmonized a high quality set of 4,094 whole genomes from HGDP and 1kGP with data from the Genome Aggregation Database (gnomAD) and identified over 153 million high-quality SNVs, indels, and SVs. We performed a detailed ancestry analysis of this cohort, characterizing population structure and patterns of admixture across populations, analyzing site frequency spectra, and measuring variant counts at global and subcontinental levels. We also demonstrate substantial added value from this dataset compared to the prior versions of the component resources, typically combined via liftover and variant intersection; for example, we catalog millions of new genetic variants, mostly rare, compared to previous releases. In addition to unrestricted individual-level public release, we provide detailed tutorials for conducting many of the most common quality control steps and analyses with these data in a scalable cloud-computing environment and publicly release this new phased joint callset for use as a haplotype resource in phasing and imputation pipelines. This jointly called reference panel will serve as a key resource to support research of diverse ancestry populations.}, } @article {pmid36733938, year = {2023}, author = {Healthcare Engineering, JO}, title = {Retracted: Discussion on Health Service System of Mobile Medical Institutions Based on Internet of Things and Cloud Computing.}, journal = {Journal of healthcare engineering}, volume = {2023}, number = {}, pages = {9892481}, pmid = {36733938}, issn = {2040-2309}, abstract = {[This retracts the article DOI: 10.1155/2022/5235349.].}, } @article {pmid36723167, year = {2023}, author = {Klukowski, P and Riek, R and Güntert, P}, title = {NMRtist: an online platform for automated biomolecular NMR spectra analysis.}, journal = {Bioinformatics (Oxford, England)}, volume = {39}, number = {2}, pages = {}, pmid = {36723167}, issn = {1367-4811}, support = {891690//European Union/ ; }, mesh = {Humans ; Nuclear Magnetic Resonance, Biomolecular ; *Software ; *Proteins/chemistry ; Magnetic Resonance Spectroscopy ; Magnetic Resonance Imaging ; }, abstract = {SUMMARY: We present NMRtist, an online platform that combines deep learning, large-scale optimization and cloud computing to automate protein NMR spectra analysis. Our website provides virtual storage for NMR spectra deposition together with a set of applications designed for automated peak picking, chemical shift assignment and protein structure determination. The system can be used by non-experts and allows protein assignments and structures to be determined within hours after the measurements, strictly without any human intervention.

NMRtist is freely available to non-commercial users at https://nmrtist.org.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid36721327, year = {2023}, author = {Batorsky, A and Bowden, AE and Darwin, J and Fields, AJ and Greco, CM and Harris, RE and Hue, TF and Kakyomya, J and Mehling, W and O'Neill, C and Patterson, CG and Piva, SR and Sollmann, N and Toups, V and Wasan, AD and Wasserman, R and Williams, DA and Vo, NV and Psioda, MA and McCumber, M}, title = {The Back Pain Consortium (BACPAC) Research Program Data Harmonization: Rationale for Data Elements and Standards.}, journal = {Pain medicine (Malden, Mass.)}, volume = {24}, number = {Suppl 1}, pages = {S95-S104}, pmid = {36721327}, issn = {1526-4637}, support = {UH2 AR076729/AR/NIAMS NIH HHS/United States ; UH3 AR076723/AR/NIAMS NIH HHS/United States ; U19 AR076737/AR/NIAMS NIH HHS/United States ; 1UH2AR076731-01/NH/NIH HHS/United States ; U19 AR076734/AR/NIAMS NIH HHS/United States ; UH2 AR076719/AR/NIAMS NIH HHS/United States ; UH3 AR076719/AR/NIAMS NIH HHS/United States ; U19 AR076725/AR/NIAMS NIH HHS/United States ; }, mesh = {Humans ; *Low Back Pain/therapy ; Outcome Assessment, Health Care ; Research Design ; }, abstract = {OBJECTIVE: One aim of the Back Pain Consortium (BACPAC) Research Program is to develop an integrated model of chronic low back pain that is informed by combined data from translational research and clinical trials. We describe efforts to maximize data harmonization and accessibility to facilitate Consortium-wide analyses.

METHODS: Consortium-wide working groups established harmonized data elements to be collected in all studies and developed standards for tabular and nontabular data (eg, imaging and omics). The BACPAC Data Portal was developed to facilitate research collaboration across the Consortium.

RESULTS: Clinical experts developed the BACPAC Minimum Dataset with required domains and outcome measures to be collected by use of questionnaires across projects. Other nonrequired domain-specific measures are collected by multiple studies. To optimize cross-study analyses, a modified data standard was developed on the basis of the Clinical Data Interchange Standards Consortium Study Data Tabulation Model to harmonize data structures and facilitate integration of baseline characteristics, participant-reported outcomes, chronic low back pain treatments, clinical exam, functional performance, psychosocial characteristics, quantitative sensory testing, imaging, and biomechanical data. Standards to accommodate the unique features of chronic low back pain data were adopted. Research units submit standardized study data to the BACPAC Data Portal, developed as a secure cloud-based central data repository and computing infrastructure for researchers to access and conduct analyses on data collected by or acquired for BACPAC.

CONCLUSIONS: BACPAC harmonization efforts and data standards serve as an innovative model for data integration that could be used as a framework for other consortia with multiple, decentralized research programs.}, } @article {pmid36720454, year = {2023}, author = {Firoz, A and Ravanan, P and Saha, P and Prashar, T and Talwar, P}, title = {Genome-wide screening and identification of potential kinases involved in endoplasmic reticulum stress responses.}, journal = {Life sciences}, volume = {317}, number = {}, pages = {121452}, doi = {10.1016/j.lfs.2023.121452}, pmid = {36720454}, issn = {1879-0631}, mesh = {Animals ; Humans ; Mice ; Rats ; Base Sequence ; *DNA-Binding Proteins/genetics ; *Endoplasmic Reticulum/metabolism ; Endoplasmic Reticulum Stress ; HeLa Cells ; Mammals/metabolism ; Transcription Factors/metabolism ; Phosphotransferases ; }, abstract = {AIM: This study aims to identify endoplasmic reticulum stress response elements (ERSE) in the human genome to explore potentially regulated genes, including kinases and transcription factors, involved in the endoplasmic reticulum (ER) stress and its related diseases.

MATERIALS AND METHODS: Python-based whole genome screening of ERSE was performed using the Amazon Web Services elastic computing system. The Kinome database was used to filter out the kinases from the extracted list of ERSE-related genes. Additionally, network analysis and genome enrichment were achieved using NDEx, the Network and Data Exchange software, and web-based computational tools. To validate the gene expression, quantitative RT-PCR was performed for selected kinases from the list by exposing the HeLa cells to tunicamycin and brefeldin, ER stress inducers, for various time points.

KEY FINDINGS: The overall number of ERSE-associated genes follows a similar pattern in humans, mice, and rats, demonstrating the ERSE's conservation in mammals. A total of 2705 ERSE sequences were discovered in the human genome (GRCh38.p14), from which we identified 36 kinases encoding genes. Gene expression analysis has shown a significant change in the expression of selected genes under ER stress conditions in HeLa cells, supporting our finding.

SIGNIFICANCE: In this study, we have introduced a rapid method using Amazon cloud-based services for genome-wide screening of ERSE sequences from both positive and negative strands, which covers the entire genome reference sequences. Approximately 10 % of human protein-protein interactomes were found to be associated with ERSE-related genes. Our study also provides a rich resource of human ER stress-response-based protein networks and transcription factor interactions and a reference point for future research aiming at targeted therapeutics.}, } @article {pmid36717471, year = {2023}, author = {Nandasena, WDKV and Brabyn, L and Serrao-Neumann, S}, title = {Monitoring invasive pines using remote sensing: a case study from Sri Lanka.}, journal = {Environmental monitoring and assessment}, volume = {195}, number = {2}, pages = {347}, pmid = {36717471}, issn = {1573-2959}, mesh = {*Remote Sensing Technology/methods ; Sri Lanka ; Conservation of Natural Resources/methods ; Environmental Monitoring/methods ; Ecosystem ; *Pinus ; }, abstract = {Production plantation forestry has many economic benefits but can also have negative environmental impacts such as the spreading of invasive pines to native forest habitats. Monitoring forest for the presence of invasive pines helps with the management of this issue. However, detection of vegetation change over a large time period is difficult due to changes in image quality and sensor types, and by the spectral similarity of evergreen species and frequent cloud cover in the study area. The costs of high-resolution images are also prohibitive for routine monitoring in resource-constrained countries. This research investigated the use of remote sensing to identify the spread of Pinus caribaea over a 21-year period (2000 to 2021) in Belihuloya, Sri Lanka, using Landsat images. It applied a range of techniques to produce cloud free images, extract vegetation features, and improve vegetation classification accuracy, followed by the use of Geographical Information System to spatially analyze the spread of invasive pines. The results showed most invading pines were found within 100 m of the pine plantations' borders where broadleaved forests and grasslands are vulnerable to invasion. However, the extent of invasive pine had an overall decline of 4 ha over the 21 years. The study confirmed that remote sensing combined with spatial analysis are effective tools for monitoring invasive pines in countries with limited resources. This study also provides information to conservationists and forest managers to conduct strategic planning for sustainable forest management and conservation in Sri Lanka.}, } @article {pmid36714386, year = {2023}, author = {Patel, YS and Bedi, J}, title = {MAG-D: A multivariate attention network based approach for cloud workload forecasting.}, journal = {Future generations computer systems : FGCS}, volume = {142}, number = {}, pages = {376-392}, pmid = {36714386}, issn = {0167-739X}, abstract = {The Coronavirus pandemic and the work-from-home have drastically changed the working style and forced us to rapidly shift towards cloud-based platforms & services for seamless functioning. The pandemic has accelerated a permanent shift in cloud migration. It is estimated that over 95% of digital workloads will reside in cloud-native platforms. Real-time workload forecasting and efficient resource management are two critical challenges for cloud service providers. As cloud workloads are highly volatile and chaotic due to their time-varying nature; thus classical machine learning-based prediction models failed to acquire accurate forecasting. Recent advances in deep learning have gained massive popularity in forecasting highly nonlinear cloud workloads; however, they failed to achieve excellent forecasting outcomes. Consequently, demands for designing more accurate forecasting algorithms exist. Therefore, in this work, we propose 'MAG-D', a Multivariate Attention and Gated recurrent unit based Deep learning approach for Cloud workload forecasting in data centers. We performed an extensive set of experiments on the Google cluster traces, and we confirm that MAG-DL exploits the long-range nonlinear dependencies of cloud workload and improves the prediction accuracy on average compared to the recent techniques applying hybrid methods using Long Short Term Memory Network (LSTM), Convolutional Neural Network (CNN), Gated Recurrent Units (GRU), and Bidirectional Long Short Term Memory Network (BiLSTM).}, } @article {pmid36712619, year = {2023}, author = {He, R and Xie, W and Wu, B and Brandon, NP and Liu, X and Li, X and Yang, S}, title = {Towards interactional management for power batteries of electric vehicles.}, journal = {RSC advances}, volume = {13}, number = {3}, pages = {2036-2056}, pmid = {36712619}, issn = {2046-2069}, abstract = {With the ever-growing digitalization and mobility of electric transportation, lithium-ion batteries are facing performance and safety issues with the appearance of new materials and the advance of manufacturing techniques. This paper presents a systematic review of burgeoning multi-scale modelling and design for battery efficiency and safety management. The rise of cloud computing provides a tactical solution on how to efficiently achieve the interactional management and control of power batteries based on the battery system and traffic big data. The potential of selecting adaptive strategies in emerging digital management is covered systematically from principles and modelling, to machine learning. Specifically, multi-scale optimization is expounded in terms of materials, structures, manufacturing and grouping. The progress on modelling, state estimation and management methods is summarized and discussed in detail. Moreover, this review demonstrates the innovative progress of machine learning based data analysis in battery research so far, laying the foundation for future cloud and digital battery management to develop reliable onboard applications.}, } @article {pmid36711159, year = {2023}, author = {D'Souza, G and Reddy, NVS and Manjunath, KN}, title = {Localization of lung abnormalities on chest X-rays using self-supervised equivariant attention.}, journal = {Biomedical engineering letters}, volume = {13}, number = {1}, pages = {21-30}, pmid = {36711159}, issn = {2093-985X}, abstract = {UNLABELLED: Chest X-Ray (CXR) images provide most anatomical details and the abnormalities on a 2D plane. Therefore, a 2D view of the 3D anatomy is sometimes sufficient for the initial diagnosis. However, close to fourteen commonly occurring diseases are sometimes difficult to identify by visually inspecting the images. Therefore, there is a drift toward developing computer-aided assistive systems to help radiologists. This paper proposes a deep learning model for the classification and localization of chest diseases by using image-level annotations. The model consists of a modified Resnet50 backbone for extracting feature corpus from the images, a classifier, and a pixel correlation module (PCM). During PCM training, the network is a weight-shared siamese architecture where the first branch applies the affine transform to the image before feeding to the network, while the second applies the same transform to the network output. The method was evaluated on CXR from the clinical center in the ratio of 70:20 for training and testing. The model was developed and tested using the cloud computing platform Google Colaboratory (NVidia Tesla P100 GPU, 16 GB of RAM). A radiologist subjectively validated the results. Our model trained with the configurations mentioned in this paper outperformed benchmark results.

SUPPLEMENTARY INFORMATION: The online version contains supplementary material available at 10.1007/s13534-022-00249-5.}, } @article {pmid36704354, year = {2022}, author = {Alvarellos, M and Sheppard, HE and Knarston, I and Davison, C and Raine, N and Seeger, T and Prieto Barja, P and Chatzou Dunford, M}, title = {Democratizing clinical-genomic data: How federated platforms can promote benefits sharing in genomics.}, journal = {Frontiers in genetics}, volume = {13}, number = {}, pages = {1045450}, pmid = {36704354}, issn = {1664-8021}, abstract = {Since the first sequencing of the human genome, associated sequencing costs have dramatically lowered, leading to an explosion of genomic data. This valuable data should in theory be of huge benefit to the global community, although unfortunately the benefits of these advances have not been widely distributed. Much of today's clinical-genomic data is siloed and inaccessible in adherence with strict governance and privacy policies, with more than 97% of hospital data going unused, according to one reference. Despite these challenges, there are promising efforts to make clinical-genomic data accessible and useful without compromising security. Specifically, federated data platforms are emerging as key resources to facilitate secure data sharing without having to physically move the data from outside of its organizational or jurisdictional boundaries. In this perspective, we summarize the overarching progress in establishing federated data platforms, and highlight critical considerations on how they should be managed to ensure patient and public trust. These platforms are enabling global collaboration and improving representation of underrepresented groups, since sequencing efforts have not prioritized diverse population representation until recently. Federated data platforms, when combined with advances in no-code technology, can be accessible to the diverse end-users that make up the genomics workforce, and we discuss potential strategies to develop sustainable business models so that the platforms can continue to enable research long term. Although these platforms must be carefully managed to ensure appropriate and ethical use, they are democratizing access and insights to clinical-genomic data that will progress research and enable impactful therapeutic findings.}, } @article {pmid36702751, year = {2023}, author = {Bang, I and Lee, SM and Park, S and Park, JY and Nong, LK and Gao, Y and Palsson, BO and Kim, D}, title = {Deep-learning optimized DEOCSU suite provides an iterable pipeline for accurate ChIP-exo peak calling.}, journal = {Briefings in bioinformatics}, volume = {24}, number = {2}, pages = {}, doi = {10.1093/bib/bbad024}, pmid = {36702751}, issn = {1477-4054}, mesh = {*Chromatin Immunoprecipitation Sequencing ; *Deep Learning ; Chromatin Immunoprecipitation ; DNA-Binding Proteins/metabolism ; Software ; Algorithms ; Binding Sites ; Sequence Analysis, DNA ; }, abstract = {Recognizing binding sites of DNA-binding proteins is a key factor for elucidating transcriptional regulation in organisms. ChIP-exo enables researchers to delineate genome-wide binding landscapes of DNA-binding proteins with near single base-pair resolution. However, the peak calling step hinders ChIP-exo application since the published algorithms tend to generate false-positive and false-negative predictions. Here, we report the development of DEOCSU (DEep-learning Optimized ChIP-exo peak calling SUite), a novel machine learning-based ChIP-exo peak calling suite. DEOCSU entails the deep convolutional neural network model which was trained with curated ChIP-exo peak data to distinguish the visualized data of bona fide peaks from false ones. Performance validation of the trained deep-learning model indicated its high accuracy, high precision and high recall of over 95%. Applying the new suite to both in-house and publicly available ChIP-exo datasets obtained from bacteria, eukaryotes and archaea revealed an accurate prediction of peaks containing canonical motifs, highlighting the versatility and efficiency of DEOCSU. Furthermore, DEOCSU can be executed on a cloud computing platform or the local environment. With visualization software included in the suite, adjustable options such as the threshold of peak probability, and iterable updating of the pre-trained model, DEOCSU can be optimized for users' specific needs.}, } @article {pmid36696392, year = {2023}, author = {Kim, J and Karyadi, DM and Hartley, SW and Zhu, B and Wang, M and Wu, D and Song, L and Armstrong, GT and Bhatia, S and Robison, LL and Yasui, Y and Carter, B and Sampson, JN and Freedman, ND and Goldstein, AM and Mirabello, L and Chanock, SJ and Morton, LM and Savage, SA and Stewart, DR}, title = {Inflated expectations: Rare-variant association analysis using public controls.}, journal = {PloS one}, volume = {18}, number = {1}, pages = {e0280951}, pmid = {36696392}, issn = {1932-6203}, support = {U24 CA055727/CA/NCI NIH HHS/United States ; }, mesh = {*Motivation ; *High-Throughput Nucleotide Sequencing/methods ; Polymorphism, Single Nucleotide ; Software ; }, abstract = {The use of publicly available sequencing datasets as controls (hereafter, "public controls") in studies of rare variant disease associations has great promise but can increase the risk of false-positive discovery. The specific factors that could contribute to inflated distribution of test statistics have not been systematically examined. Here, we leveraged both public controls, gnomAD v2.1 and several datasets sequenced in our laboratory to systematically investigate factors that could contribute to the false-positive discovery, as measured by λΔ95, a measure to quantify the degree of inflation in statistical significance. Analyses of datasets in this investigation found that 1) the significantly inflated distribution of test statistics decreased substantially when the same variant caller and filtering pipelines were employed, 2) differences in library prep kits and sequencers did not affect the false-positive discovery rate and, 3) joint vs. separate variant-calling of cases and controls did not contribute to the inflation of test statistics. Currently available methods do not adequately adjust for the high false-positive discovery. These results, especially if replicated, emphasize the risks of using public controls for rare-variant association tests in which individual-level data and the computational pipeline are not readily accessible, which prevents the use of the same variant-calling and filtering pipelines on both cases and controls. A plausible solution exists with the emergence of cloud-based computing, which can make it possible to bring containerized analytical pipelines to the data (rather than the data to the pipeline) and could avert or minimize these issues. It is suggested that future reports account for this issue and provide this as a limitation in reporting new findings based on studies that cannot practically analyze all data on a single pipeline.}, } @article {pmid36695636, year = {2023}, author = {Wang, J and Zheng, J and Lee, EE and Aguilar, B and Phan, J and Abdilleh, K and Taylor, RC and Longabaugh, W and Johansson, B and Mertens, F and Mitelman, F and Pot, D and LaFramboise, T}, title = {A cloud-based resource for genome coordinate-based exploration and large-scale analysis of chromosome aberrations and gene fusions in cancer.}, journal = {Genes, chromosomes & cancer}, volume = {62}, number = {8}, pages = {441-448}, pmid = {36695636}, issn = {1098-2264}, support = {R21 CA249138/CA/NCI NIH HHS/United States ; R21CA249138/NH/NIH HHS/United States ; R01CA217992/NH/NIH HHS/United States ; HHSN261201500003C/CA/NCI NIH HHS/United States ; HHSN261201400008C/CA/NCI NIH HHS/United States ; R01 CA217992/CA/NCI NIH HHS/United States ; R01LM013067/NH/NIH HHS/United States ; R01 LM013067/LM/NLM NIH HHS/United States ; /HH/HHS/United States ; HHSN261201500003I/CA/NCI NIH HHS/United States ; }, mesh = {Humans ; *Cloud Computing ; Chromosome Aberrations ; Karyotyping ; *Neoplasms/genetics ; Gene Fusion ; }, abstract = {Cytogenetic analysis provides important information on the genetic mechanisms of cancer. The Mitelman Database of Chromosome Aberrations and Gene Fusions in Cancer (Mitelman DB) is the largest catalog of acquired chromosome aberrations, presently comprising >70 000 cases across multiple cancer types. Although this resource has enabled the identification of chromosome abnormalities leading to specific cancers and cancer mechanisms, a large-scale, systematic analysis of these aberrations and their downstream implications has been difficult due to the lack of a standard, automated mapping from aberrations to genomic coordinates. We previously introduced CytoConverter as a tool that automates such conversions. CytoConverter has now been updated with improved interpretation of karyotypes and has been integrated with the Mitelman DB, providing a comprehensive mapping of the 70 000+ cases to genomic coordinates, as well as visualization of the frequencies of chromosomal gains and losses. Importantly, all CytoConverter-generated genomic coordinates are publicly available in Google BigQuery, a cloud-based data warehouse, facilitating data exploration and integration with other datasets hosted by the Institute for Systems Biology Cancer Gateway in the Cloud (ISB-CGC) Resource. We demonstrate the use of BigQuery for integrative analysis of Mitelman DB with other cancer datasets, including a comparison of the frequency of imbalances identified in Mitelman DB cases with those found in The Cancer Genome Atlas (TCGA) copy number datasets. This solution provides opportunities to leverage the power of cloud computing for low-cost, scalable, and integrated analysis of chromosome aberrations and gene fusions in cancer.}, } @article {pmid36694127, year = {2023}, author = {Digby, B and Finn, SP and Ó Broin, P}, title = {nf-core/circrna: a portable workflow for the quantification, miRNA target prediction and differential expression analysis of circular RNAs.}, journal = {BMC bioinformatics}, volume = {24}, number = {1}, pages = {27}, pmid = {36694127}, issn = {1471-2105}, support = {18/CRT/6214/SFI_/Science Foundation Ireland/Ireland ; }, mesh = {*MicroRNAs/genetics/metabolism ; RNA, Circular ; Workflow ; Software ; Sequence Analysis, RNA ; }, abstract = {BACKGROUND: Circular RNAs (circRNAs) are a class of covalenty closed non-coding RNAs that have garnered increased attention from the research community due to their stability, tissue-specific expression and role as transcriptional modulators via sequestration of miRNAs. Currently, multiple quantification tools capable of detecting circRNAs exist, yet none delineate circRNA-miRNA interactions, and only one employs differential expression analysis. Efforts have been made to bridge this gap by way of circRNA workflows, however these workflows are limited by both the types of analyses available and computational skills required to run them.

RESULTS: We present nf-core/circrna, a multi-functional, automated high-throughput pipeline implemented in nextflow that allows users to characterise the role of circRNAs in RNA Sequencing datasets via three analysis modules: (1) circRNA quantification, robust filtering and annotation (2) miRNA target prediction of the mature spliced sequence and (3) differential expression analysis. nf-core/circrna has been developed within the nf-core framework, ensuring robust portability across computing environments via containerisation, parallel deployment on cluster/cloud-based infrastructures, comprehensive documentation and maintenance support.

CONCLUSION: nf-core/circrna reduces the barrier to entry for researchers by providing an easy-to-use, platform-independent and scalable workflow for circRNA analyses. Source code, documentation and installation instructions are freely available at https://nf-co.re/circrna and https://github.com/nf-core/circrna .}, } @article {pmid36691672, year = {2023}, author = {Ørka, HO and Gailis, J and Vege, M and Gobakken, T and Hauglund, K}, title = {Analysis-ready satellite data mosaics from Landsat and Sentinel-2 imagery.}, journal = {MethodsX}, volume = {10}, number = {}, pages = {101995}, pmid = {36691672}, issn = {2215-0161}, abstract = {Today's enormous amounts of freely available high-resolution satellite imagery provide the demand for effective preprocessing methods. One such preprocessing method needed in many applications utilizing optical satellite imagery from the Landsat and Sentinel-2 archives is mosaicking. Merging hundreds of single scenes into a single satellite data mosaic before conducting analysis such as land cover classification, change detection, or modelling is often a prerequisite. Maintaining the original data structure and preserving metadata for further modelling or classification would be advantageous for many applications. Furthermore, in other applications, e.g., connected to land cover classification creating the mosaic for a specific period matching the phenological state of the phenomena in nature would be beneficial. In addition, supporting in-house and computing centers not directly connected to a specific cloud provider could be a requirement for some institutions or companies. In the current work, we present a method called Geomosaic that meets these criteria and produces analysis-ready satellite data mosaics from Landsat and Sentinel-2 imagery.•The method described produces analysis-ready satellite data mosaics.•The satellite data mosaics contain pixel metadata usable for further analysis.•The algorithm is available as an open-source tool coded in Python and can be used on multiple platforms.}, } @article {pmid36691530, year = {2023}, author = {Oñate, W and Sanz, R}, title = {Analysis of architectures implemented for IIoT.}, journal = {Heliyon}, volume = {9}, number = {1}, pages = {e12868}, pmid = {36691530}, issn = {2405-8440}, abstract = {Several technological blocks are being developed to provide solutions to the requirements necessary for the implementation of industrial IoT. However, this is feasible with the resources offered by the Cloud, such as processing, applications and services. Despite this, there are negative aspects such as bandwidth, Internet service variability, latency, lack of filtering of junk data transmitted to the cloud and security. From another perspective, these situations emerge as challenges that are being studied to meet the needs of this new industrial era, which means that the important contribution of academia, companies and consortiums, are achieving a change of course, by taking advantage of the potential of the Cloud but now from the vicinity or perimeter of a production plant. To achieve this task, some pillars of IoT technology are being used as a basis, such as the designs of Fog Computing Platforms (FCP), Edge Computing (EC) and considering the need for cooperation between IT and operation technologies (IT and OT), with which it is intended to accelerate the paradigm shift that this situation has generated. The objective of this study is to show a systematic literature review (SLR) of recent studies on hierarchical and flat peer-to-peer (P2P) architectures implemented for manufacturing IIoT, analyzing those successes and weaknesses derived from them such as latency, security, computing methodologies, virtualization architectures, Fog Computing (FC) in Manufacturing Execution Systems (MES), Quality of Service (QoS) and connectivity, with the aim of motivating possible research points when implementing IIoT with these new technologies.}, } @article {pmid36690091, year = {2023}, author = {Li, Z and Demir, I}, title = {U-net-based semantic classification for flood extent extraction using SAR imagery and GEE platform: A case study for 2019 central US flooding.}, journal = {The Science of the total environment}, volume = {869}, number = {}, pages = {161757}, doi = {10.1016/j.scitotenv.2023.161757}, pmid = {36690091}, issn = {1879-1026}, abstract = {Data-driven models for water body extraction have experienced accelerated growth in recent years, thanks to advances in processing techniques and computational resources, as well as improved data availability. In this study, we modified the standard U-Net, a convolutional neural network (CNN) method, to extract water bodies from scenes captured from Sentinel-1 satellites of selected areas during the 2019 Central US flooding. We compared the results to several benchmark models, including the standard U-Net and ResNet50, an advanced thresholding method, Bmax Otsu, and a recently introduced flood inundation map archive. Then, we looked at how data input types, input resolution, and using pre-trained weights affect the model performance. We adopted a three-category classification frame to test whether and how permanent water and flood pixels behave differently. Most of the data in this study were gathered and pre-processed utilizing the open access Google Earth Engine (GEE) cloud platform. According to the results, the adjusted U-Net outperformed all other benchmark models and datasets. Adding a slope layer enhances model performance with the 30 m input data compared to training the model on only VV and VH bands of SAR images. Adding DEM and Height Above Nearest Drainage (HAND) model data layer improved performance for models trained on 10 m datasets. The results also suggested that CNN-based semantic segmentation may fail to correctly classify pixels around narrow river channels. Furthermore, our findings revealed that it is necessary to differentiate permanent water and flood pixels because they behave differently. Finally, the results indicated that using pre-trained weights from a coarse dataset can significantly minimize initial training loss on finer datasets and speed up convergence.}, } @article {pmid36687286, year = {2023}, author = {Ali, O and AlAhmad, A and Kahtan, H}, title = {A review of advanced technologies available to improve the healthcare performance during COVID-19 pandemic.}, journal = {Procedia computer science}, volume = {217}, number = {}, pages = {205-216}, pmid = {36687286}, issn = {1877-0509}, abstract = {Information technology (IT) has enabled the initiation of an innovative healthcare system. An innovative healthcare system integrates new technologies such as cloud computing, the internet of things, and artificial intelligence (AI), to transform the healthcare to be more efficient, more convenient and more personalized. This review aims to identify the key technologies that will help to support an innovative healthcare system. A case study approach was used in this research analysis to enable a researcher to closely analyze the data in a particular context. It presents a case study of the coronavirus (COVID-19) as a means of exploring the use of advanced technologies in an innovative healthcare system to help address a worldwide health crisis. An innovative healthcare system can help to promote better patient self-management, reduce costs, relieve staff pressures, help with resource and knowledge management, and improve the patient experience. An innovative healthcare system can reduce the expense and time for research, and increase the overall efficacy of the research. Overall, this research identifies how innovative technologies can improve the performance of the healthcare system. Advanced technologies can assist with pandemic control and can help in the recognition of the virus, clinical treatment, medical protection, intelligent diagnosis, and outbreak analysis. The review provides an analysis of the future prospects of an innovative healthcare system.}, } @article {pmid36685273, year = {2023}, author = {Yilmaz, OS and Acar, U and Sanli, FB and Gulgen, F and Ates, AM}, title = {Mapping burn severity and monitoring CO content in Türkiye's 2021 Wildfires, using Sentinel-2 and Sentinel-5P satellite data on the GEE platform.}, journal = {Earth science informatics}, volume = {16}, number = {1}, pages = {221-240}, pmid = {36685273}, issn = {1865-0473}, abstract = {This study investigated forest fires in the Mediterranean of Türkiye between July 28, 2021, and August 11, 2021. Burn severity maps were produced with the difference normalised burned ratio index (dNBR) and difference normalised difference vegetation index (dNDVI) using Sentinel-2 images on the Google Earth Engine (GEE) cloud platform. The burned areas were estimated based on the determined burning severity degrees. Vegetation density losses in burned areas were analysed using the normalised difference vegetation index (NDVI) time series. At the same time, the post-fire Carbon Monoxide (CO) column number densities were determined using the Sentinel-5P satellite data. According to the burn severity maps obtained with dNBR, the sum of high and moderate severity areas constitutes 34.64%, 20.57%, 46.43%, 51.50% and 18.88% of the entire area in Manavgat, Gündoğmuş, Marmaris, Bodrum and Köyceğiz districts, respectively. Likewise, according to the burn severity maps obtained with dNDVI, the sum of the areas of very high severity and high severity constitutes 41.17%, 30.16%, 30.50%, 42.35%, and 10.40% of the entire region, respectively. In post-fire NDVI time series analyses, sharp decreases were observed in NDVI values from 0.8 to 0.1 in all burned areas. While the Tropospheric CO column number density was 0.03 mol/m[2] in all regions burned before the fire, it was observed that this value increased to 0.14 mol/m[2] after the fire. Moreover, when the area was examined more broadly with Sentinel 5P data, it was observed that the amount of CO increased up to a maximum value of 0.333 mol/m[2]. The results of this study present significant information in terms of determining the severity of forest fires in the Mediterranean region in 2021 and the determination of the CO column number density after the fire. In addition, monitoring polluting gases with RS techniques after forest fires is essential in understanding the extent of the damage they can cause to the environment.}, } @article {pmid36679810, year = {2023}, author = {Yang, H and Zhou, H and Liu, Z and Deng, X}, title = {Energy Optimization of Wireless Sensor Embedded Cloud Computing Data Monitoring System in 6G Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, pmid = {36679810}, issn = {1424-8220}, support = {110822150//Southwest Forestry University/ ; 194309//Southwest Forestry University/ ; 111022001//Southwest Forestry University/ ; }, abstract = {With the construction and development of modern and smart cities, people's lives are becoming more intelligent and diversified. Surveillance systems increasingly play an active role in target tracking, vehicle identification, traffic management, etc. In the 6G network environment, facing the massive and large-scale data information in the monitoring system, it is difficult for the ordinary processing platform to meet this computing demand. This paper provides a data governance solution based on a 6G environment. The shortcomings of critical technologies in wireless sensor networks are addressed through ZigBee energy optimization to address the shortage of energy supply and high energy consumption in the practical application of wireless sensor networks. At the same time, this improved routing algorithm is combined with embedded cloud computing to optimize the monitoring system and achieve efficient data processing. The ZigBee-optimized wireless sensor network consumes less energy in practice and also increases the service life of the network, as proven by research and experiments. This optimized data monitoring system ensures data security and reliability.}, } @article {pmid36679800, year = {2023}, author = {Oztoprak, K and Tuncel, YK and Butun, I}, title = {Technological Transformation of Telco Operators towards Seamless IoT Edge-Cloud Continuum.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, pmid = {36679800}, issn = {1424-8220}, abstract = {This article investigates and discusses challenges in the telecommunication field from multiple perspectives, both academic and industry sides are catered for, surveying the main points of technological transformation toward edge-cloud continuum from the view of a telco operator to show the complete picture, including the evolution of cloud-native computing, Software-Defined Networking (SDN), and network automation platforms. The cultural shift in software development and management with DevOps enabled the development of significant technologies in the telecommunication world, including network equipment, application development, and system orchestration. The effect of the aforementioned cultural shift to the application area, especially from the IoT point of view, is investigated. The enormous change in service diversity and delivery capabilities to mass devices are also discussed. During the last two decades, desktop and server virtualization has played an active role in the Information Technology (IT) world. With the use of OpenFlow, SDN, and Network Functions Virtualization (NFV), the network revolution has got underway. The shift from monolithic application development and deployment to micro-services changed the whole picture. On the other hand, the data centers evolved in several generations where the control plane cannot cope with all the networks without an intelligent decision-making process, benefiting from the AI/ML techniques. AI also enables operators to forecast demand more accurately, anticipate network load, and adjust capacity and throughput automatically. Going one step further, zero-touch networking and service management (ZSM) is proposed to get high-level human intents to generate a low-level configuration for network elements with validated results, minimizing the ratio of faults caused by human intervention. Harmonizing all signs of progress in different communication technologies enabled the use of edge computing successfully. Low-powered (from both energy and processing perspectives) IoT networks have disrupted the customer and end-point demands within the sector, as such paved the path towards devising the edge computing concept, which finalized the whole picture of the edge-cloud continuum.}, } @article {pmid36679795, year = {2023}, author = {Yin, HC and Lien, JJ}, title = {Cascaded Segmentation U-Net for Quality Evaluation of Scraping Workpiece.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, pmid = {36679795}, issn = {1424-8220}, mesh = {*Algorithms ; Cloud Computing ; *Data Compression ; Industry ; Judgment ; Image Processing, Computer-Assisted ; }, abstract = {In the terms of industry, the hand-scraping method is a key technology for achieving high precision in machine tools, and the quality of scraping workpieces directly affects the accuracy and service life of the machine tool. However, most of the quality evaluation of the scraping workpieces is carried out by the scraping worker's subjective judgment, which results in differences in the quality of the scraping workpieces and is time-consuming. Hence, in this research, an edge-cloud computing system was developed to obtain the relevant parameters, which are the percentage of point (POP) and the peak point per square inch (PPI), for evaluating the quality of scraping workpieces. On the cloud computing server-side, a novel network called cascaded segmentation U-Net is proposed to high-quality segment the height of points (HOP) (around 40 μm height) in favor of small datasets training and then carries out a post-processing algorithm that automatically calculates POP and PPI. This research emphasizes the architecture of the network itself instead. The design of the components of our network is based on the basic idea of identity function, which not only solves the problem of the misjudgment of the oil ditch and the residual pigment but also allows the network to be end-to-end trained effectively. At the head of the network, a cascaded multi-stage pixel-wise classification is designed for obtaining more accurate HOP borders. Furthermore, the "Cross-dimension Compression" stage is used to fuse high-dimensional semantic feature maps across the depth of the feature maps into low-dimensional feature maps, producing decipherable content for final pixel-wise classification. Our system can achieve an error rate of 3.7% and 0.9 points for POP and PPI. The novel network achieves an Intersection over Union (IoU) of 90.2%.}, } @article {pmid36679792, year = {2023}, author = {Kopras, B and Idzikowski, F and Bossy, B and Kryszkiewicz, P and Bogucka, H}, title = {Communication and Computing Task Allocation for Energy-Efficient Fog Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, pmid = {36679792}, issn = {1424-8220}, support = {Bailout supporting development of young scientists in 2021/22 within task "Optimization 402 of the operation of wireless networks and compression of test data"//Polish Ministry of Education and Science/ ; 2021/41/N/ST7/03941 on "Fresh and Green Cellular IoT Edge Computing Networks - FitNets"//National Science Centre, Poland/ ; }, mesh = {*Communication ; *Algorithms ; Cloud Computing ; Heuristics ; }, abstract = {The well known cloud computing is being extended by the idea of fog with the computing nodes placed closer to end users to allow for task processing with tighter latency requirements. However, offloading of tasks (from end devices to either the cloud or to the fog nodes) should be designed taking energy consumption for both transmission and computation into account. The task allocation procedure can be challenging considering the high number of arriving tasks with various computational, communication and delay requirements, and the high number of computing nodes with various communication and computing capabilities. In this paper, we propose an optimal task allocation procedure, minimizing consumed energy for a set of users connected wirelessly to a network composed of FN located at AP and CN. We optimize the assignment of AP and computing nodes to offloaded tasks as well as the operating frequencies of FN. The considered problem is formulated as a Mixed-Integer Nonlinear Programming problem. The utilized energy consumption and delay models as well as their parameters, related to both the computation and communication costs, reflect the characteristics of real devices. The obtained results show that it is profitable to split the processing of tasks between multiple FNs and the cloud, often choosing different nodes for transmission and computation. The proposed algorithm manages to find the optimal allocations and outperforms all the considered alternative allocation strategies resulting in the lowest energy consumption and task rejection rate. Moreover, a heuristic algorithm that decouples the optimization of wireless transmission from implemented computations and wired transmission is proposed. It finds the optimal or close-to-optimal solutions for all of the studied scenarios.}, } @article {pmid36679619, year = {2023}, author = {Mirza, IB and Georgakopoulos, D and Yavari, A}, title = {Cyber-Physical-Social Awareness Platform for Comprehensive Situation Awareness.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, pmid = {36679619}, issn = {1424-8220}, abstract = {Cyber-physical-social computing system integrates the interactions between cyber, physical, and social spaces by fusing information from these spaces. The result of this fusion can be used to drive many applications in areas such as intelligent transportation, smart cities, and healthcare. Situation Awareness was initially used in military services to provide knowledge of what is happening in a combat zone but has been used in many other areas such as disaster mitigation. Various applications have been developed to provide situation awareness using either IoT sensors or social media information spaces and, more recently, using both IoT sensors and social media information spaces. The information from these spaces is heterogeneous and, at their intersection, is sparse. In this paper, we propose a highly scalable, novel Cyber-physical-social Awareness (CPSA) platform that provides situation awareness by using and intersecting information from both IoT sensors and social media. By combining and fusing information from both social media and IoT sensors, the CPSA platform provides more comprehensive and accurate situation awareness than any other existing solutions that rely only on data from social media and IoT sensors. The CPSA platform achieves that by semantically describing and integrating the information extracted from sensors and social media spaces and intersects this information for enriching situation awareness. The CPSA platform uses user-provided situation models to refine and intersect cyber, physical, and social information. The CPSA platform analyses social media and IoT data using pretrained machine learning models deployed in the cloud, and provides coordination between information sources and fault tolerance. The paper describes the implementation and evaluation of the CPSA platform. The evaluation of the CPSA platform is measured in terms of capabilities such as the ability to semantically describe and integrate heterogenous information, fault tolerance, and time constraints such as processing time and throughput when performing real-world experiments. The evaluation shows that the CPSA platform can reliably process and intersect with large volumes of IoT sensor and social media data to provide enhanced situation awareness.}, } @article {pmid36679524, year = {2023}, author = {Chen, J and Zhou, J and Liu, L and Shu, C and Shen, M and Yao, W}, title = {Sow Farrowing Early Warning and Supervision for Embedded Board Implementations.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, pmid = {36679524}, issn = {1424-8220}, support = {32272929//National Natural Science Foundation of China/ ; KYCYXT2022019//Fundamental Research Funds for the Central Universities/ ; }, mesh = {Animals ; Swine ; Humans ; Animals, Newborn ; *Breeding ; }, abstract = {Sow farrowing is an important part of pig breeding. The accurate and effective early warning of sow behaviors in farrowing helps breeders determine whether it is necessary to intervene with the farrowing process in a timely manner and is thus essential for increasing the survival rate of piglets and the profits of pig farms. For large pig farms, human resources and costs are important considerations in farrowing supervision. The existing method, which uses cloud computing-based deep learning to supervise sow farrowing, has a high equipment cost and requires uploading all data to a cloud data center, requiring a large network bandwidth. Thus, this paper proposes an approach for the early warning and supervision of farrowing behaviors based on the embedded artificial-intelligence computing platform (NVIDIA Jetson Nano). This lightweight deep learning method allows the rapid processing of sow farrowing video data at edge nodes, reducing the bandwidth requirement and ensuring data security in the network transmission. Experiments indicated that after the model was migrated to the Jetson Nano, its precision of sow postures and newborn piglets detection was 93.5%, with a recall rate of 92.2%, and the detection speed was increased by a factor larger than 8. The early warning of 18 approaching farrowing (5 h) sows were tested. The mean error of warning was 1.02 h.}, } @article {pmid36679463, year = {2023}, author = {Hussain, MM and Azar, AT and Ahmed, R and Umar Amin, S and Qureshi, B and Dinesh Reddy, V and Alam, I and Khan, ZI}, title = {SONG: A Multi-Objective Evolutionary Algorithm for Delay and Energy Aware Facility Location in Vehicular Fog Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, pmid = {36679463}, issn = {1424-8220}, support = {TBA//Prince Sultan University/ ; }, mesh = {*Algorithms ; *Transportation ; Physical Phenomena ; Biological Evolution ; }, abstract = {With the emergence of delay- and energy-critical vehicular applications, forwarding sense-actuate data from vehicles to the cloud became practically infeasible. Therefore, a new computational model called Vehicular Fog Computing (VFC) was proposed. It offloads the computation workload from passenger devices (PDs) to transportation infrastructures such as roadside units (RSUs) and base stations (BSs), called static fog nodes. It can also exploit the underutilized computation resources of nearby vehicles that can act as vehicular fog nodes (VFNs) and provide delay- and energy-aware computing services. However, the capacity planning and dimensioning of VFC, which come under a class of facility location problems (FLPs), is a challenging issue. The complexity arises from the spatio-temporal dynamics of vehicular traffic, varying resource demand from PD applications, and the mobility of VFNs. This paper proposes a multi-objective optimization model to investigate the facility location in VFC networks. The solutions to this model generate optimal VFC topologies pertaining to an optimized trade-off (Pareto front) between the service delay and energy consumption. Thus, to solve this model, we propose a hybrid Evolutionary Multi-Objective (EMO) algorithm called Swarm Optimized Non-dominated sorting Genetic algorithm (SONG). It combines the convergence and search efficiency of two popular EMO algorithms: the Non-dominated Sorting Genetic Algorithm (NSGA-II) and Speed-constrained Particle Swarm Optimization (SMPSO). First, we solve an example problem using the SONG algorithm to illustrate the delay-energy solution frontiers and plotted the corresponding layout topology. Subsequently, we evaluate the evolutionary performance of the SONG algorithm on real-world vehicular traces against three quality indicators: Hyper-Volume (HV), Inverted Generational Distance (IGD) and CPU delay gap. The empirical results show that SONG exhibits improved solution quality over the NSGA-II and SMPSO algorithms and hence can be utilized as a potential tool by the service providers for the planning and design of VFC networks.}, } @article {pmid36679436, year = {2023}, author = {Gec, S and Stankovski, V and Lavbič, D and Kochovski, P}, title = {A Recommender System for Robust Smart Contract Template Classification.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, pmid = {36679436}, issn = {1424-8220}, support = {957338//European Commission/ ; P2-0426//Research Agency of the Republic of Slovenia/ ; }, mesh = {*Ecosystem ; *Algorithms ; Cloud Computing ; Documentation ; Trust ; }, abstract = {IoT environments are becoming increasingly heterogeneous in terms of their distributions and included entities by collaboratively involving not only data centers known from Cloud computing but also the different types of third-party entities that can provide computing resources. To transparently provide such resources and facilitate trust between the involved entities, it is necessary to develop and implement smart contracts. However, when developing smart contracts, developers face many challenges and concerns, such as security, contracts' correctness, a lack of documentation and/or design patterns, and others. To address this problem, we propose a new recommender system to facilitate the development and implementation of low-cost EVM-enabled smart contracts. The recommender system's algorithm provides the smart contract developer with smart contract templates that match their requirements and that are relevant to the typology of the fog architecture. It mainly relies on OpenZeppelin, a modular, reusable, and secure smart contract library that we use when classifying the smart contracts. The evaluation results indicate that by using our solution, the smart contracts' development times are overall reduced. Moreover, such smart contracts are sustainable for fog-computing IoT environments and applications in low-cost EVM-based ledgers. The recommender system has been successfully implemented in the ONTOCHAIN ecosystem, thus presenting its applicability.}, } @article {pmid36679409, year = {2023}, author = {Sakaguchi, Y and Bakibillah, ASM and Kamal, MAS and Yamada, K}, title = {A Cyber-Physical Framework for Optimal Coordination of Connected and Automated Vehicles on Multi-Lane Freeways.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, pmid = {36679409}, issn = {1424-8220}, support = {Grant-in-Aids for Scientific Research (C) 20K04531//Japan Society for the Promotion of Science/ ; }, abstract = {Uncoordinated driving behavior is one of the main reasons for bottlenecks on freeways. This paper presents a novel cyber-physical framework for optimal coordination of connected and automated vehicles (CAVs) on multi-lane freeways. We consider that all vehicles are connected to a cloud-based computing framework, where a traffic coordination system optimizes the target trajectories of individual vehicles for smooth and safe lane changing or merging. In the proposed framework, the vehicles are coordinated into groups or platoons, and their trajectories are successively optimized in a receding horizon control (RHC) approach. Optimization of the traffic coordination system aims to provide sufficient gaps when a lane change is necessary while minimizing the speed deviation and acceleration of all vehicles. The coordination information is then provided to individual vehicles equipped with local controllers, and each vehicle decides its control acceleration to follow the target trajectories while ensuring a safe distance. Our proposed method guarantees fast optimization and can be used in real-time. The proposed coordination system was evaluated using microscopic traffic simulations and benchmarked with the traditional driving (human-based) system. The results show significant improvement in fuel economy, average velocity, and travel time for various traffic volumes.}, } @article {pmid36679360, year = {2023}, author = {Khan, AQ and Nikolov, N and Matskin, M and Prodan, R and Roman, D and Sahin, B and Bussler, C and Soylu, A}, title = {Smart Data Placement Using Storage-as-a-Service Model for Big Data Pipelines.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, pmid = {36679360}, issn = {1424-8220}, mesh = {*Algorithms ; *Big Data ; Software ; Computers ; Computer Security ; }, abstract = {Big data pipelines are developed to process data characterized by one or more of the three big data features, commonly known as the three Vs (volume, velocity, and variety), through a series of steps (e.g., extract, transform, and move), making the ground work for the use of advanced analytics and ML/AI techniques. Computing continuum (i.e., cloud/fog/edge) allows access to virtually infinite amount of resources, where data pipelines could be executed at scale; however, the implementation of data pipelines on the continuum is a complex task that needs to take computing resources, data transmission channels, triggers, data transfer methods, integration of message queues, etc., into account. The task becomes even more challenging when data storage is considered as part of the data pipelines. Local storage is expensive, hard to maintain, and comes with several challenges (e.g., data availability, data security, and backup). The use of cloud storage, i.e., storage-as-a-service (StaaS), instead of local storage has the potential of providing more flexibility in terms of scalability, fault tolerance, and availability. In this article, we propose a generic approach to integrate StaaS with data pipelines, i.e., computation on an on-premise server or on a specific cloud, but integration with StaaS, and develop a ranking method for available storage options based on five key parameters: cost, proximity, network performance, server-side encryption, and user weights/preferences. The evaluation carried out demonstrates the effectiveness of the proposed approach in terms of data transfer performance, utility of the individual parameters, and feasibility of dynamic selection of a storage option based on four primary user scenarios.}, } @article {pmid36673212, year = {2022}, author = {Gavreev, MA and Kiktenko, EO and Mastiukova, AS and Fedorov, AK}, title = {Suppressing Decoherence in Quantum State Transfer with Unitary Operations.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {1}, pages = {}, pmid = {36673212}, issn = {1099-4300}, abstract = {Decoherence is the fundamental obstacle limiting the performance of quantum information processing devices. The problem of transmitting a quantum state (known or unknown) from one place to another is of great interest in this context. In this work, by following the recent theoretical proposal, we study an application of quantum state-dependent pre- and post-processing unitary operations for protecting the given (multi-qubit) quantum state against the effect of decoherence acting on all qubits. We observe the increase in the fidelity of the output quantum state both in a quantum emulation experiment, where all protecting unitaries are perfect, and in a real experiment with a cloud-accessible quantum processor, where protecting unitaries themselves are affected by the noise. We expect the considered approach to be useful for analyzing capabilities of quantum information processing devices in transmitting known quantum states. We also demonstrate the applicability of the developed approach for suppressing decoherence in the process of distributing a two-qubit state over remote physical qubits of a quantum processor.}, } @article {pmid36670240, year = {2023}, author = {Yıldırım, E and Cicioğlu, M and Çalhan, A}, title = {Fog-cloud architecture-driven Internet of Medical Things framework for healthcare monitoring.}, journal = {Medical & biological engineering & computing}, volume = {61}, number = {5}, pages = {1133-1147}, pmid = {36670240}, issn = {1741-0444}, mesh = {Humans ; *COVID-19 ; Internet ; Algorithms ; Cloud Computing ; Communication ; }, abstract = {The new coronavirus disease (COVID-19) has increased the need for new technologies such as the Internet of Medical Things (IoMT), Wireless Body Area Networks (WBANs), and cloud computing in the health sector as well as in many areas. These technologies have also made it possible for billions of devices to connect to the internet and communicate with each other. In this study, an Internet of Medical Things (IoMT) framework consisting of Wireless Body Area Networks (WBANs) has been designed and the health big data from WBANs have been analyzed using fog and cloud computing technologies. Fog computing is used for fast and easy analysis, and cloud computing is used for time-consuming and complex analysis. The proposed IoMT framework is presented with a diabetes prediction scenario. The diabetes prediction process is carried out on fog with fuzzy logic decision-making and is achieved on cloud with support vector machine (SVM), random forest (RF), and artificial neural network (ANN) as machine learning algorithms. The dataset produced in WBANs is used for big data analysis in the scenario for both fuzzy logic and machine learning algorithm. The fuzzy logic gives 64% accuracy performance in fog and SVM, RF, and ANN have 89.5%, 88.4%, and 87.2% accuracy performance respectively in the cloud for diabetes prediction. In addition, the throughput and delay results of heterogeneous nodes with different priorities in the WBAN scenario created using the IEEE 802.15.6 standard and AODV routing protocol have been also analyzed. Fog-Cloud architecture-driven for IoMT networks • An IoMT framework is designed with important components and functions such as fog and cloud node capabilities. •Real-time data has been obtained from WBANs in Riverbed Modeler for a more realistic performance analysis of IoMT. •Fuzzy logic and machine learning algorithms (RF, SVM, and ANN) are used for diabetes predictions. •Intra and Inter-WBAN communications (IEEE 802.15.6 standard) are modeled as essential components of the IoMT framework with all functions.}, } @article {pmid36658205, year = {2023}, author = {Kazemi Garajeh, M and Salmani, B and Zare Naghadehi, S and Valipoori Goodarzi, H and Khasraei, A}, title = {An integrated approach of remote sensing and geospatial analysis for modeling and predicting the impacts of climate change on food security.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {1057}, pmid = {36658205}, issn = {2045-2322}, mesh = {Humans ; *Climate Change ; *Remote Sensing Technology ; Soil ; Agriculture/methods ; Food Security ; }, abstract = {The agriculture sector provides the majority of food supplies, ensures food security, and promotes sustainable development. Due to recent climate changes as well as trends in human population growth and environmental degradation, the need for timely agricultural information continues to rise. This study analyzes and predicts the impacts of climate change on food security (FS). For 2002-2021, Landsat, MODIS satellite images and predisposing variables (land surface temperature (LST), evapotranspiration, precipitation, sunny days, cloud ratio, soil salinity, soil moisture, groundwater quality, soil types, digital elevation model, slope, and aspect) were used. First, we used a deep learning convolutional neural network (DL-CNN) based on the Google Earth Engine (GEE) to detect agricultural land (AL). A remote sensing-based approach combined with the analytical network process (ANP) model was used to identify frost-affected areas. We then analyzed the relationship between climatic, geospatial, and topographical variables and AL and frost-affected areas. We found negative correlations of - 0.80, - 0.58, - 0.43, and - 0.45 between AL and LST, evapotranspiration, cloud ratio, and soil salinity, respectively. There is a positive correlation between AL and precipitation, sunny days, soil moisture, and groundwater quality of 0.39, 0.25, 0.21, and 0.77, respectively. The correlation between frost-affected areas and LST, evapotranspiration, cloud ratio, elevation, slope, and aspect are 0.55, 0.40, 0.52, 0.35, 0.45, and 0.39. Frost-affected areas have negative correlations with precipitation, sunny day, and soil moisture of - 0.68, - 0.23, and - 0.38, respectively. Our findings show that the increase in LST, evapotranspiration, cloud ratio, and soil salinity is associated with the decrease in AL. Additionally, AL decreases with a decreasing in precipitation, sunny days, soil moisture, and groundwater quality. It was also found that as LST, evapotranspiration, cloud ratio, elevation, slope, and aspect increase, frost-affected areas increase as well. Furthermore, frost-affected areas increase when precipitation, sunny days, and soil moisture decrease. Finally, we predicted the FS threat for 2030, 2040, 2050, and 2060 using the CA-Markov method. According to the results, the AL will decrease by 0.36% from 2030 to 2060. Between 2030 and 2060, however, the area with very high frost-affected will increase by about 10.64%. In sum, this study accentuates the critical impacts of climate change on the FS in the region. Our findings and proposed methods could be helpful for researchers to model and quantify the climate change impacts on the FS in different regions and periods.}, } @article {pmid36658166, year = {2023}, author = {Tsakanikas, V and Dagiuklas, T and Iqbal, M and Wang, X and Mumtaz, S}, title = {An intelligent model for supporting edge migration for virtual function chains in next generation internet of things.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {1063}, pmid = {36658166}, issn = {2045-2322}, abstract = {The developments on next generation IoT sensing devices, with the advances on their low power computational capabilities and high speed networking has led to the introduction of the edge computing paradigm. Within an edge cloud environment, services may generate and consume data locally, without involving cloud computing infrastructures. Aiming to tackle the low computational resources of the IoT nodes, Virtual-Function-Chain has been proposed as an intelligent distribution model for exploiting the maximum of the computational power at the edge, thus enabling the support of demanding services. An intelligent migration model with the capacity to support Virtual-Function-Chains is introduced in this work. According to this model, migration at the edge can support individual features of a Virtual-Function-Chain. First, auto-healing can be implemented with cold migrations, if a Virtual Function fails unexpectedly. Second, a Quality of Service monitoring model can trigger live migrations, aiming to avoid edge devices overload. The evaluation studies of the proposed model revealed that it has the capacity to increase the robustness of an edge-based service on low-powered IoT devices. Finally, comparison with similar frameworks, like Kubernetes, showed that the migration model can effectively react on edge network fluctuations.}, } @article {pmid36654019, year = {2022}, author = {Yin, Y and Wang, Z and Zhou, W and Gan, Y and Zhang, Y}, title = {Group key agreement protocol for edge computing in industrial internet.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {19}, number = {12}, pages = {12730-12743}, doi = {10.3934/mbe.2022594}, pmid = {36654019}, issn = {1551-0018}, mesh = {*Computer Security ; Cloud Computing ; Internet ; *Outsourced Services ; Communication ; }, abstract = {Industrial internet security is a critical component of cyberspace safety. Furthermore, the encryption protocol is a critical component of cyberspace security. Due to the rapid development of industrial internet and edge computing, increasingly more devices are outsourcing their data to cloud servers to save costs. Edge devices should have a secure session key to reduce communication costs and share information. However, most key generation and storage are completed by a centralized third-party organization, which carries some security risks. In this context, this paper will propose a lightweight multi-dimensional virtual iteration of the group key agreement protocol. Group key agreement protocol allows for one-at-a-time encryption and timely key updates without the involvement of a trusted third party, and each device in the network can agreement a large number of keys. According to the analysis of this protocol, it has high security, rapid computation speed, and little storage space.}, } @article {pmid36648445, year = {2023}, author = {Deutsch, EW and Mendoza, L and Shteynberg, DD and Hoopmann, MR and Sun, Z and Eng, JK and Moritz, RL}, title = {Trans-Proteomic Pipeline: Robust Mass Spectrometry-Based Proteomics Data Analysis Suite.}, journal = {Journal of proteome research}, volume = {22}, number = {2}, pages = {615-624}, pmid = {36648445}, issn = {1535-3907}, support = {R24 GM127667/GM/NIGMS NIH HHS/United States ; U19 AG023122/AG/NIA NIH HHS/United States ; R01 HL133135/HL/NHLBI NIH HHS/United States ; R01 GM087221/GM/NIGMS NIH HHS/United States ; S10 OD026936/OD/NIH HHS/United States ; }, mesh = {*Proteomics/methods ; *Software ; Mass Spectrometry ; Probability ; Data Analysis ; }, abstract = {The Trans-Proteomic Pipeline (TPP) mass spectrometry data analysis suite has been in continual development and refinement since its first tools, PeptideProphet and ProteinProphet, were published 20 years ago. The current release provides a large complement of tools for spectrum processing, spectrum searching, search validation, abundance computation, protein inference, and more. Many of the tools include machine-learning modeling to extract the most information from data sets and build robust statistical models to compute the probabilities that derived information is correct. Here we present the latest information on the many TPP tools, and how TPP can be deployed on various platforms from personal Windows laptops to Linux clusters and expansive cloud computing environments. We describe tutorials on how to use TPP in a variety of ways and describe synergistic projects that leverage TPP. We conclude with plans for continued development of TPP.}, } @article {pmid36645733, year = {2023}, author = {Yazdani, A and Dashti, SF and Safdari, Y}, title = {A fog-assisted information model based on priority queue and clinical decision support systems.}, journal = {Health informatics journal}, volume = {29}, number = {1}, pages = {14604582231152792}, doi = {10.1177/14604582231152792}, pmid = {36645733}, issn = {1741-2811}, mesh = {Humans ; Cloud Computing ; *Decision Support Systems, Clinical ; *Telemedicine ; }, abstract = {OBJECTIVES: Telehealth monitoring applications are latency-sensitive. The current fog-based telehealth monitoring models are mainly focused on the role of the fog computing in improving response time and latency. In this paper, we have introduced a new service called "priority queue" in fog layer, which is programmed to prioritize the events sent by different sources in different environments to assist the cloud layer with reducing response time and latency.

MATERIAL AND METHODS: We analyzed the performance of the proposed model in a fog-enabled cloud environment with the IFogSim toolkit. To provide a comparison of cloud and fog computing environments, three parameters namely response time, latency, and network usage were used. We used the Pima Indian diabetes dataset to evaluate the model.

RESULT: The fog layer proved to be very effective in improving the response time while handling emergencies using priority queues. The proposed model reduces response time by 25.8%, latency by 36.18%, bandwidth by 28.17%, and network usage time by 41.4% as compared to the cloud.

CONCLUSION: By combining priority queues, and fog computing in this study, the network usage, latency time, bandwidth, and response time were significantly reduced as compared to cloud computing.}, } @article {pmid36642685, year = {2023}, author = {Akgün, FA and Fındık, Y and Solak, S and Uçar, MHB and Büyükçavuş, MH and Baykul, T}, title = {Face comparison analysis of patients with orthognathic surgery treatment using cloud computing-based face recognition application programming interfaces.}, journal = {American journal of orthodontics and dentofacial orthopedics : official publication of the American Association of Orthodontists, its constituent societies, and the American Board of Orthodontics}, volume = {163}, number = {5}, pages = {710-719}, doi = {10.1016/j.ajodo.2022.05.023}, pmid = {36642685}, issn = {1097-6752}, mesh = {Humans ; *Orthognathic Surgery ; Face ; *Facial Recognition ; Cloud Computing ; *Orthognathic Surgical Procedures ; Software ; }, abstract = {INTRODUCTION: This study aimed to investigate whether the postoperative change in patients after orthognathic surgery, whose facial aesthetics was affected, led to detectable differences using Microsoft Azure, Amazon Web Services Rekognition, and Face[++], which were commercially available face recognition systems.

METHODS: Photographs of 35 patients after orthognathic surgery were analyzed using 3 well-known cloud computing-based facial recognition application programming interfaces to compute similarity scores between preoperative and postoperative photographs. The preoperative, relaxed, smiling, profile, and semiprofile photographs of the patients were compared separately to validate the relevant application programming interfaces. Patient characteristics and type of surgery were recorded for statistical analysis. Kruskal-Wallis rank sum tests were performed to analyze the relationship between patient characteristics and similarity scores. Multiple-comparison Wilcoxon rank sum tests were performed on the statistically significant characteristics.

RESULTS: The similarity scores in the Face[++] program were lower than those in the Microsoft Azure and Amazon Web Services Rekognition. In addition, the similarity scores were higher in smiling photographs. A statistically significant difference was found in similarity scores between relaxed and smiling photographs according to different programs (P <0.05). For all 3 facial recognition programs, comparable similarity scores were found in all photographs taken before and after surgery across sex, type of surgery, and type of surgical approach. The type of surgery and surgical approach, sex, and amount of surgical movement did not significantly affect similarity scores in any facial recognition programs (P >0.05).

CONCLUSIONS: The similarity scores between the photographs before and after orthognathic surgery were high, suggesting that the software algorithms might value measurements on the basis of upper-face landmarks more than lower-face measurements.}, } @article {pmid36641699, year = {2023}, author = {Koch, M and Arlandini, C and Antonopoulos, G and Baretta, A and Beaujean, P and Bex, GJ and Biancolini, ME and Celi, S and Costa, E and Drescher, L and Eleftheriadis, V and Fadel, NA and Fink, A and Galbiati, F and Hatzakis, I and Hompis, G and Lewandowski, N and Memmolo, A and Mensch, C and Obrist, D and Paneta, V and Papadimitroulas, P and Petropoulos, K and Porziani, S and Savvidis, G and Sethia, K and Strakos, P and Svobodova, P and Vignali, E}, title = {HPC+ in the medical field: Overview and current examples.}, journal = {Technology and health care : official journal of the European Society for Engineering and Medicine}, volume = {31}, number = {4}, pages = {1509-1523}, pmid = {36641699}, issn = {1878-7401}, mesh = {Child ; Humans ; *Computing Methodologies ; Image Processing, Computer-Assisted ; *Software ; }, abstract = {BACKGROUND: To say data is revolutionising the medical sector would be a vast understatement. The amount of medical data available today is unprecedented and has the potential to enable to date unseen forms of healthcare. To process this huge amount of data, an equally huge amount of computing power is required, which cannot be provided by regular desktop computers. These areas can be (and already are) supported by High-Performance-Computing (HPC), High-Performance Data Analytics (HPDA), and AI (together "HPC+").

OBJECTIVE: This overview article aims to show state-of-the-art examples of studies supported by the National Competence Centres (NCCs) in HPC+ within the EuroCC project, employing HPC, HPDA and AI for medical applications.

METHOD: The included studies on different applications of HPC in the medical sector were sourced from the National Competence Centres in HPC and compiled into an overview article. Methods include the application of HPC+ for medical image processing, high-performance medical and pharmaceutical data analytics, an application for pediatric dosimetry, and a cloud-based HPC platform to support systemic pulmonary shunting procedures.

RESULTS: This article showcases state-of-the-art applications and large-scale data analytics in the medical sector employing HPC+ within surgery, medical image processing in diagnostics, nutritional support of patients in hospitals, treating congenital heart diseases in children, and within basic research.

CONCLUSION: HPC+ support scientific fields from research to industrial applications in the medical area, enabling researchers to run faster and more complex calculations, simulations and data analyses for the direct benefit of patients, doctors, clinicians and as an accelerator for medical research.}, } @article {pmid36637558, year = {2022}, author = {Ye, W and Wang, J and Tian, H and Quan, H}, title = {Public auditing for real-time medical sensor data in cloud-assisted HealthIIoT system.}, journal = {Frontiers of optoelectronics}, volume = {15}, number = {1}, pages = {29}, pmid = {36637558}, issn = {2095-2767}, abstract = {With the advancement of industrial internet of things (IIoT), wireless medical sensor networks (WMSNs) have been widely introduced in modern healthcare systems to collect real-time medical data from patients, which is known as HealthIIoT. Considering the limited computing and storage capabilities of lightweight HealthIIoT devices, it is necessary to upload these data to remote cloud servers for storage and maintenance. However, there are still some serious security issues within outsourcing medical sensor data to the cloud. One of the most significant challenges is how to ensure the integrity of these data, which is a prerequisite for providing precise medical diagnosis and treatment. To meet this challenge, we propose a novel and efficient public auditing scheme, which is suitable for cloud-assisted HealthIIoT system. Specifically, to address the contradiction between the high real-time requirement of medical sensor data and the limited computing power of HealthIIoT devices, a new online/offline tag generation algorithm is designed to improve preprocessing efficiency; to protect medical data privacy, a secure hash function is employed to blind the data proof. We formally prove the security of the presented scheme, and evaluate the performance through detailed experimental comparisons with the state-of-the-art ones. The results show that the presented scheme can greatly improve the efficiency of tag generation, while achieving better auditing performance than previous schemes.}, } @article {pmid36636525, year = {2023}, author = {Wang, SH and Khan, MA and Zhu, Z and Zhang, YD}, title = {WACPN: A Neural Network for Pneumonia Diagnosis.}, journal = {Computer systems science and engineering}, volume = {45}, number = {1}, pages = {21-34}, pmid = {36636525}, issn = {2766-483X}, support = {AA/18/3/34220/BHF_/British Heart Foundation/United Kingdom ; MC_PC_17171/MRC_/Medical Research Council/United Kingdom ; }, abstract = {Community-acquired pneumonia (CAP) is considered a sort of pneumonia developed outside hospitals and clinics. To diagnose community-acquired pneumonia (CAP) more efficiently, we proposed a novel neural network model. We introduce the 2-dimensional wavelet entropy (2d-WE) layer and an adaptive chaotic particle swarm optimization (ACP) algorithm to train the feed-forward neural network. The ACP uses adaptive inertia weight factor (AIWF) and Rossler attractor (RA) to improve the performance of standard particle swarm optimization. The final combined model is named WE-layer ACP-based network (WACPN), which attains a sensitivity of 91.87±1.37%, a specificity of 90.70±1.19%, a precision of 91.01±1.12%, an accuracy of 91.29±1.09%, F1 score of 91.43±1.09%, an MCC of 82.59±2.19%, and an FMI of 91.44±1.09%. The AUC of this WACPN model is 0.9577. We find that the maximum deposition level chosen as four can obtain the best result. Experiments demonstrate the effectiveness of both AIWF and RA. Finally, this proposed WACPN is efficient in diagnosing CAP and superior to six state-of-the-art models. Our model will be distributed to the cloud computing environment.}, } @article {pmid36627353, year = {2023}, author = {Saxena, D and Singh, AK and Lee, CN and Buyya, R}, title = {A sustainable and secure load management model for green cloud data centres.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {491}, pmid = {36627353}, issn = {2045-2322}, mesh = {*Algorithms ; *Neural Networks, Computer ; Cloud Computing ; }, abstract = {The massive upsurge in cloud resource demand and inefficient load management stave off the sustainability of Cloud Data Centres (CDCs) resulting in high energy consumption, resource contention, excessive carbon emission, and security threats. In this context, a novel Sustainable and Secure Load Management (SaS-LM) Model is proposed to enhance the security for users with sustainability for CDCs. The model estimates and reserves the required resources viz., compute, network, and storage and dynamically adjust the load subject to maximum security and sustainability. An evolutionary optimization algorithm named Dual-Phase Black Hole Optimization (DPBHO) is proposed for optimizing a multi-layered feed-forward neural network and allowing the model to estimate resource usage and detect probable congestion. Further, DPBHO is extended to a Multi-objective DPBHO algorithm for a secure and sustainable VM allocation and management to minimize the number of active server machines, carbon emission, and resource wastage for greener CDCs. SaS-LM is implemented and evaluated using benchmark real-world Google Cluster VM traces. The proposed model is compared with state-of-the-arts which reveals its efficacy in terms of reduced carbon emission and energy consumption up to 46.9% and 43.9%, respectively with improved resource utilization up to 16.5%.}, } @article {pmid36624887, year = {2023}, author = {Saba, T and Rehman, A and Haseeb, K and Alam, T and Jeon, G}, title = {Cloud-edge load balancing distributed protocol for IoE services using swarm intelligence.}, journal = {Cluster computing}, volume = {}, number = {}, pages = {1-11}, pmid = {36624887}, issn = {1386-7857}, abstract = {Rapid development of the Internet of Everything (IoE) and cloud services offer a vital role in the growth of smart applications. It provides scalability with the collaboration of cloud servers and copes with a big amount of collected data for network systems. Although, edge computing supports efficient utilization of communication bandwidth, and latency requirements to facilitate smart embedded systems. However, it faces significant research issues regarding data aggregation among heterogeneous network services and objects. Moreover, distributed systems are more precise for data access and storage, thus machine-to-machine is needed to be secured from unpredictable events. As a result, this research proposed secured data management with distributed load balancing protocol using particle swarm optimization, which aims to decrease the response time for cloud users and effectively maintain the integrity of network communication. It combines distributed computing and shift high cost computations closer to the requesting node to reduce latency and transmission overhead. Moreover, the proposed work also protects the communicating machines from malicious devices by evaluating the trust in a controlled manner. Simulation results revealed a significant performance of the proposed protocol in comparison to other solutions in terms of energy consumption by 20%, success rate by 17%, end-to-end delay by 14%, and network cost by 19% as average in the light of various performance metrics.}, } @article {pmid36624868, year = {2023}, author = {Liu, X and Gao, A and Chen, C and Moghimi, MM}, title = {Lightweight similarity checking for English literatures in mobile edge computing.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {12}, number = {1}, pages = {3}, pmid = {36624868}, issn = {2192-113X}, abstract = {With the advent of information age, mobile devices have become one of the major convenient equipment that aids people's daily office activities such as academic research, one of whose major tasks is to check the repetition rate or similarity among different English literatures. Traditional literature similarity checking solutions in cloud paradigm often call for intensive computational cost and long waiting time. To tackle this issue, in this paper, we modify the traditional literature similarity checking solution in cloud paradigm to make it suitable for the light-weight mobile edge environment. Furthermore, we put forward a lightweight similarity checking approach SC MEC for English literatures in mobile edge computing environment. To validate the advantages of SC MEC , we have designed massive experiments on a dataset. The reported experimental results show that SC MEC can deliver a satisfactory similarity checking result of literatures compared to other existing approaches.}, } @article {pmid36620727, year = {2022}, author = {Wegner, T and Lassnig, M and Ueberholz, P and Zeitnitz, C}, title = {Simulation and Evaluation of Cloud Storage Caching for Data Intensive Science.}, journal = {Computing and software for big science}, volume = {6}, number = {1}, pages = {5}, pmid = {36620727}, issn = {2510-2044}, abstract = {A common task in scientific computing is the data reduction. This workflow extracts the most important information from large input data and stores it in smaller derived data objects. The derived data objects can then be used for further analysis. Typically, these workflows use distributed storage and computing resources. A straightforward setup of storage media would be low-cost tape storage and higher-cost disk storage. The large, infrequently accessed input data are stored on tape storage. The smaller, frequently accessed derived data is stored on disk storage. In a best-case scenario, the large input data is only accessed very infrequently and in a well-planned pattern. However, practice shows that often the data has to be processed continuously and unpredictably. This can significantly reduce tape storage performance. A common approach to counter this is storing copies of the large input data on disk storage. This contribution evaluates an approach that uses cloud storage resources to serve as a flexible cache or buffer, depending on the computational workflow. The proposed model is explored for the case of continuously processed data. For the evaluation, a simulation tool was developed, which can be used to analyse models related to storage and network resources. We show that using commercial cloud storage can reduce on-premises disk storage requirements, while maintaining an equal throughput of jobs. Moreover, the key metrics of the model are discussed, and an approach is described, which uses the simulation to assist with the decision process of using commercial cloud storage. The goal is to investigate approaches and propose new evaluation methods to overcome future data challenges.}, } @article {pmid36617078, year = {2023}, author = {Harach, T and Simonik, P and Vrtkova, A and Mrovec, T and Klein, T and Ligori, JJ and Koreny, M}, title = {Novel Method for Determining Internal Combustion Engine Dysfunctions on Platform as a Service.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {1}, pages = {}, pmid = {36617078}, issn = {1424-8220}, support = {CZ.02.1.01/0.0/0.0/17_049/0008425//A Research Platform focused on Industry 4.0 and Robotics in Ostrava Agglomeration/ ; CZ.02.1.01/0.0/0.0/16_019/0000867//European Regional Development Fund in the Research Centre of Advanced Mechatronic Systems/ ; SP2022/9//VSB - Technical University of Ostrava, Czech Republic/ ; }, mesh = {*Vehicle Emissions/analysis ; *Machine Learning ; Cloud Computing ; Gasoline/analysis ; }, abstract = {This article deals with a unique, new powertrain diagnostics platform at the level of a large number of EU25 inspection stations. Implemented method uses emission measurement data and additional data from significant sample of vehicles. An original technique using machine learning that uses 9 static testing points (defined by constant engine load and constant engine speed), volume of engine combustion chamber, EURO emission standard category, engine condition state coefficient and actual mileage is applied. An example for dysfunction detection using exhaust emission analyses is described in detail. The test setup is also described, along with the procedure for data collection using a Mindsphere cloud data processing platform. Mindsphere is a core of the new Platform as a Service (Paas) for data processing from multiple testing facilities. An evaluation on a fleet level which used quantile regression method is implemented. In this phase of the research, real data was used, as well as data defined on the basis of knowledge of the manifestation of internal combustion engine defects. As a result of the application of the platform and the evaluation method, it is possible to classify combustion engine dysfunctions. These are defects that cannot be detected by self-diagnostic procedures for cars up to the EURO 6 level.}, } @article {pmid36616922, year = {2022}, author = {Martínez-Otzeta, JM and Rodríguez-Moreno, I and Mendialdua, I and Sierra, B}, title = {RANSAC for Robotic Applications: A Survey.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {1}, pages = {}, pmid = {36616922}, issn = {1424-8220}, support = {IT1427-22//Basque Government/ ; KK-2022/00065//Basque Government/ ; FPU18/04737//Spanish Ministry of Science, Innovation and Universities/ ; PID2021-122402OB-C21//Spanish Ministry of Science, Innovation and Universities/ ; PID2021-122402OB-C21//Spanish State Research Agency/ ; PID2021-122402OB-C21//European Regional Development Fund/ ; }, mesh = {*Algorithms ; *Robotics ; Research Design ; }, abstract = {Random Sample Consensus, most commonly abbreviated as RANSAC, is a robust estimation method for the parameters of a model contaminated by a sizable percentage of outliers. In its simplest form, the process starts with a sampling of the minimum data needed to perform an estimation, followed by an evaluation of its adequacy, and further repetitions of this process until some stopping criterion is met. Multiple variants have been proposed in which this workflow is modified, typically tweaking one or several of these steps for improvements in computing time or the quality of the estimation of the parameters. RANSAC is widely applied in the field of robotics, for example, for finding geometric shapes (planes, cylinders, spheres, etc.) in cloud points or for estimating the best transformation between different camera views. In this paper, we present a review of the current state of the art of RANSAC family methods with a special interest in applications in robotics.}, } @article {pmid36616830, year = {2022}, author = {Abolhassani Khajeh, S and Saberikamarposhti, M and Rahmani, AM}, title = {Real-Time Scheduling in IoT Applications: A Systematic Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {1}, pages = {}, pmid = {36616830}, issn = {1424-8220}, mesh = {Communication ; Internet ; *Internet of Things ; }, abstract = {The Internet of Things (IoT) is a telecommunication network in the next generation of applications with the rapid progress of wireless sensor network techniques that have touched many spheres of life today. Hardware, telephony, communications, storage, secure platforms, software and services, and data processing platforms are all part of the IoT environment. IoT sensors collect data from their environment and share it by connecting to the Internet gateway. These sensors often perform tasks without human intervention. This article aims to review real-time scheduling in the IoT to fully understand the issues raised in this area published from 2018 to 2022. A classification for IoT applications based on practical application is provided for selected studies. Selected studies include healthcare, infrastructure, industrial applications, smart city, commercial applications, environmental protection, and general IoT applications. Studies are sorted into groups based on related applications and compared based on indicators such as performance time, energy consumption, makespan, and assessment environments depending on the provided classification. Finally, this paper discusses all reviewed studies' main concepts, disadvantages, advantages, and future work.}, } @article {pmid36616797, year = {2022}, author = {Bhatia, J and Italiya, K and Jadeja, K and Kumhar, M and Chauhan, U and Tanwar, S and Bhavsar, M and Sharma, R and Manea, DL and Verdes, M and Raboaca, MS}, title = {An Overview of Fog Data Analytics for IoT Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {1}, pages = {}, pmid = {36616797}, issn = {1424-8220}, abstract = {With the rapid growth in the data and processing over the cloud, it has become easier to access those data. On the other hand, it poses many technical and security challenges to the users of those provisions. Fog computing makes these technical issues manageable to some extent. Fog computing is one of the promising solutions for handling the big data produced by the IoT, which are often security-critical and time-sensitive. Massive IoT data analytics by a fog computing structure is emerging and requires extensive research for more proficient knowledge and smart decisions. Though an advancement in big data analytics is taking place, it does not consider fog data analytics. However, there are many challenges, including heterogeneity, security, accessibility, resource sharing, network communication overhead, the real-time data processing of complex data, etc. This paper explores various research challenges and their solution using the next-generation fog data analytics and IoT networks. We also performed an experimental analysis based on fog computing and cloud architecture. The result shows that fog computing outperforms the cloud in terms of network utilization and latency. Finally, the paper is concluded with future trends.}, } @article {pmid36616774, year = {2022}, author = {Condon, F and Martínez, JM and Eltamaly, AM and Kim, YC and Ahmed, MA}, title = {Design and Implementation of a Cloud-IoT-Based Home Energy Management System.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {1}, pages = {}, pmid = {36616774}, issn = {1424-8220}, support = {ID11200178//Agencia Nacional de Investigación y Desarrollo/ ; 2021R1I1A305872911//National Research Foundation of Korea/ ; }, abstract = {The advances in the Internet of Things (IoT) and cloud computing opened new opportunities for developing various smart grid applications and services. The rapidly increasing adoption of IoT devices has enabled the development of applications and solutions to manage energy consumption efficiently. This work presents the design and implementation of a home energy management system (HEMS), which allows collecting and storing energy consumption data from appliances and the main load of the home. Two scenarios are designed and implemented: a local HEMS isolated from the Internet and relies on its processing and storage duties using an edge device and a Cloud HEMS using AWS IoT Core to manage incoming data messages and provide data-driven services and applications. A testbed was carried out in a real house in the city of Valparaiso, Chile, over a one-year period, where four appliances were used to collect energy consumption using smart plugs, as well as collecting the main energy load of the house through a data logger acting as a smart meter. To the best of our knowledge, this is the first electrical energy dataset with a 10-second sampling rate from a real household in Valparaiso, Chile. Results show that both implementations perform the baseline tasks (collecting, storing, and controlling) for a HEMS. This work contributes by providing a detailed technical implementation of HEMS that enables researchers and engineers to develop and implement HEMS solutions to support different smart home applications.}, } @article {pmid36616737, year = {2022}, author = {Zheng, Y and Luo, J and Chen, W and Zhang, Y and Sun, H and Pan, Z}, title = {Unsupervised 3D Reconstruction with Multi-Measure and High-Resolution Loss.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {1}, pages = {}, pmid = {36616737}, issn = {1424-8220}, support = {62076251//National Natural Science Foundation of China/ ; }, abstract = {Multi-view 3D reconstruction technology based on deep learning is developing rapidly. Unsupervised learning has become a research hotspot because it does not need ground truth labels. The current unsupervised method mainly uses 3DCNN to regularize the cost volume to regression image depth. This approach results in high memory requirements and long computing time. In this paper, we propose an end-to-end unsupervised multi-view 3D reconstruction network framework based on PatchMatch, Unsup_patchmatchnet. It dramatically reduces memory requirements and computing time. We propose a feature point consistency loss function. We incorporate various self-supervised signals such as photometric consistency loss and semantic consistency loss into the loss function. At the same time, we propose a high-resolution loss method. This improves the reconstruction of high-resolution images. The experiment proves that the memory usage of the network is reduced by 80% and the running time is reduced by more than 50% compared with the network using 3DCNN method. The overall error of reconstructed 3D point cloud is only 0.501 mm. It is superior to most current unsupervised multi-view 3D reconstruction networks. Then, we test on different data sets and verify that the network has good generalization.}, } @article {pmid36616717, year = {2022}, author = {Passian, A and Buchs, G and Seck, CM and Marino, AM and Peters, NA}, title = {The Concept of a Quantum Edge Simulator: Edge Computing and Sensing in the Quantum Era.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {1}, pages = {}, pmid = {36616717}, issn = {1424-8220}, abstract = {Sensors, enabling observations across vast spatial, spectral, and temporal scales, are major data generators for information technology (IT). Processing, storing, and communicating this ever-growing amount of data pose challenges for the current IT infrastructure. Edge computing-an emerging paradigm to overcome the shortcomings of cloud-based computing-could address these challenges. Furthermore, emerging technologies such as quantum computing, quantum sensing, and quantum communications have the potential to fill the performance gaps left by their classical counterparts. Here, we present the concept of an edge quantum computing (EQC) simulator-a platform for designing the next generation of edge computing applications. An EQC simulator is envisioned to integrate elements from both quantum technologies and edge computing to allow studies of quantum edge applications. The presented concept is motivated by the increasing demand for more sensitive and precise sensors that can operate faster at lower power consumption, generating both larger and denser datasets. These demands may be fulfilled with edge quantum sensor networks. Envisioning the EQC era, we present our view on how such a scenario may be amenable to quantification and design. Given the cost and complexity of quantum systems, constructing physical prototypes to explore design and optimization spaces is not sustainable, necessitating EQC infrastructure and component simulators to aid in co-design. We discuss what such a simulator may entail and possible use cases that invoke quantum computing at the edge integrated with new sensor infrastructures.}, } @article {pmid36610429, year = {2023}, author = {Krumm, N}, title = {Organizational and Technical Security Considerations for Laboratory Cloud Computing.}, journal = {The journal of applied laboratory medicine}, volume = {8}, number = {1}, pages = {180-193}, doi = {10.1093/jalm/jfac118}, pmid = {36610429}, issn = {2576-9456}, mesh = {Humans ; *Cloud Computing ; Reproducibility of Results ; *Privacy ; Delivery of Health Care ; }, abstract = {BACKGROUND: Clinical and anatomical pathology services are increasingly utilizing cloud information technology (IT) solutions to meet growing requirements for storage, computation, and other IT services. Cloud IT solutions are often considered on the promise of low cost of entry, durability and reliability, scalability, and features that are typically out of reach for small- or mid-sized IT organizations. However, use of cloud-based IT infrastructure also brings additional security and privacy risks to organizations, as unfamiliarity, public networks, and complex feature sets contribute to an increased surface area for attacks.

CONTENT: In this best-practices guide, we aim to help both managers and IT professionals in healthcare environments understand the requirements and risks when using cloud-based IT infrastructure within the laboratory environment. We will describe how technical, operational, and organizational best practices that can help mitigate security, privacy, and other risks associated with the use of could infrastructure; furthermore, we identify how these best practices fit into healthcare regulatory frameworks.Among organizational best practices, we identify the need for specific hiring requirements, relationships with parent IT groups, mechanisms for reviewing and auditing security practices, and sound practices for onboarding and offboarding employees. Then, we highlight selected specific operational security, account security, and auditing/logging best practices. Finally, we describe how individual cloud technologies have specific resource-level security features.

SUMMARY: We emphasize that laboratory directors, managers, and IT professionals must ensure that the fundamental organizational and process-based requirements are addressed first, to establish the groundwork for technical security solutions and successful implementation of cloud infrastructure.}, } @article {pmid36597385, year = {2022}, author = {Zhang, J and Liu, T and Yu, Y}, title = {[Research on Comprehensive Safety Monitoring System for Elderly Care Based on Artificial Intelligence and Information Fusion].}, journal = {Zhongguo yi liao qi xie za zhi = Chinese journal of medical instrumentation}, volume = {46}, number = {6}, pages = {611-614}, doi = {10.3969/j.issn.1671-7104.2022.06.005}, pmid = {36597385}, issn = {1671-7104}, mesh = {Humans ; Aged ; *Artificial Intelligence ; *Algorithms ; Monitoring, Physiologic ; Machine Learning ; China ; }, abstract = {Nowadays, China has entered into an aging society; how to ensure safety in elderly care has drawn social attention. Through artificial intelligence and multi-information fusion research, combined with the applications of machine learning algorithms, internet of things devices and cloud computing, this paper presents a comprehensive, intelligent safety monitoring system for the elderly in the community and at home. The system collects the daily life data of the elderly through a series of sensors in an all-round, all-time, and non-intrusive manner, and realizes intelligent alarms for high-risk states such as falls, acute illness, abnormal personnel, and gas smoke for the elderly. Through the innovative research of human pose estimation and behavior recognition, and application of multi-sensor information fusion, the system can greatly reduce the occurrence or injury caused by safety incidents in senior care, bringing safe and healthy living environment for the elderly at homes and communities.}, } @article {pmid36590844, year = {2022}, author = {Gudla, SPK and Bhoi, SK and Nayak, SR and Singh, KK and Verma, A and Izonin, I}, title = {A Deep Intelligent Attack Detection Framework for Fog-Based IoT Systems.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6967938}, pmid = {36590844}, issn = {1687-5273}, mesh = {Humans ; *Benchmarking ; *Communication ; Computer Simulation ; Data Collection ; Intelligence ; }, abstract = {Fog computing provides a multitude of end-based IoT system services. End IoT devices exchange information with fog nodes and the cloud to handle client undertakings. During the process of data collection between the layer of fog and the cloud, there are more chances of crucial attacks or assaults like DDoS and many more security attacks being compromised by IoT end devices. These network (NW) threats must be spotted early. Deep learning (DL) assumes an unmistakable part in foreseeing the end client behavior by extricating highlights and grouping the foe in the network. Yet, because of IoT devices' compelled nature in calculation and storage spaces, DL cannot be managed on those. Here, a framework for fog-based attack detection is proffered, and different attacks are prognosticated utilizing long short-term memory (LSTM). The end IoT gadget behaviour can be prognosticated by installing a trained LSTMDL model at the fog node computation module. The simulations are performed using Python by comparing LSTMDL model with deep neural multilayer perceptron (DNMLP), bidirectional LSTM (Bi-LSTM), gated recurrent units (GRU), hybrid ensemble model (HEM), and hybrid deep learning model (CNN + LSTM) comprising convolutional neural network (CNN) and LSTM on DDoS-SDN (Mendeley Dataset), NSLKDD, UNSW-NB15, and IoTID20 datasets. To evaluate the performance of the binary classifier, metrics like accuracy, precision, recall, f1-score, and ROC-AUC curves are considered on these datasets. The LSTMDL model shows outperforming nature in binary classification with 99.70%, 99.12%, 94.11%, and 99.88% performance accuracies on experimentation with respective datasets. The network simulation further shows how different DL models present fog layer communication behaviour detection time (CBDT). DNMLP detects communication behaviour (CB) faster than other models, but LSTMDL predicts assaults better.}, } @article {pmid36590152, year = {2022}, author = {Farhadi, F and Barnes, MR and Sugito, HR and Sin, JM and Henderson, ER and Levy, JJ}, title = {Applications of artificial intelligence in orthopaedic surgery.}, journal = {Frontiers in medical technology}, volume = {4}, number = {}, pages = {995526}, pmid = {36590152}, issn = {2673-3129}, support = {K23 EB026507/EB/NIBIB NIH HHS/United States ; }, abstract = {The practice of medicine is rapidly transforming as a result of technological breakthroughs. Artificial intelligence (AI) systems are becoming more and more relevant in medicine and orthopaedic surgery as a result of the nearly exponential growth in computer processing power, cloud based computing, and development, and refining of medical-task specific software algorithms. Because of the extensive role of technologies such as medical imaging that bring high sensitivity, specificity, and positive/negative prognostic value to management of orthopaedic disorders, the field is particularly ripe for the application of machine-based integration of imaging studies, among other applications. Through this review, we seek to promote awareness in the orthopaedics community of the current accomplishments and projected uses of AI and ML as described in the literature. We summarize the current state of the art in the use of ML and AI in five key orthopaedic disciplines: joint reconstruction, spine, orthopaedic oncology, trauma, and sports medicine.}, } @article {pmid36589280, year = {2023}, author = {Panja, S and Chattopadhyay, AK and Nag, A and Singh, JP}, title = {Fuzzy-logic-based IoMT framework for COVID19 patient monitoring.}, journal = {Computers & industrial engineering}, volume = {176}, number = {}, pages = {108941}, pmid = {36589280}, issn = {1879-0550}, abstract = {Smart healthcare is an integral part of a smart city, which provides real time and intelligent remote monitoring and tracking services to patients and elderly persons. In the era of an extraordinary public health crisis due to the spread of the novel coronavirus (2019-nCoV), which caused the deaths of millions and affected a multitude of people worldwide in different ways, the role of smart healthcare has become indispensable. Any modern method that allows for speedy and efficient monitoring of COVID19-affected patients could be highly beneficial to medical staff. Several smart-healthcare systems based on the Internet of Medical Things (IoMT) have attracted worldwide interest in their growing technical assistance in health services, notably in predicting, identifying and preventing, and their remote surveillance of most infectious diseases. In this paper, a real time health monitoring system for COVID19 patients based on edge computing and fuzzy logic technique is proposed. The proposed model makes use of the IoMT architecture to collect real time biological data (or health information) from the patients to monitor and analyze the health conditions of the infected patients and generates alert messages that are transmitted to the concerned parties such as relatives, medical staff and doctors to provide appropriate treatment in a timely fashion. The health data are collected through sensors attached to the patients and transmitted to the edge devices and cloud storage for further processing. The collected data are analyzed through fuzzy logic in edge devices to efficiently identify the risk status (such as low risk, moderate risk and high risk) of the COVID19 patients in real time. The proposed system is also associated with a mobile app that enables the continuous monitoring of the health status of the patients. Moreover, once alerted by the system about the high risk status of a patient, a doctor can fetch all the health records of the patient for a specified period, which can be utilized for a detailed clinical diagnosis.}, } @article {pmid36588663, year = {2023}, author = {Gezimati, M and Singh, G}, title = {Advances in terahertz technology for cancer detection applications.}, journal = {Optical and quantum electronics}, volume = {55}, number = {2}, pages = {151}, pmid = {36588663}, issn = {0306-8919}, abstract = {Currently, there is an increasing demand for the diagnostic techniques that provide functional and morphological information with early cancer detection capability. Novel modern medical imaging systems driven by the recent advancements in technology such as terahertz (THz) and infrared radiation-based imaging technologies which are complementary to conventional modalities are being developed, investigated, and validated. The THz cancer imaging techniques offer novel opportunities for label free, non-ionizing, non-invasive and early cancer detection. The observed image contrast in THz cancer imaging studies has been mostly attributed to higher refractive index, absorption coefficient and dielectric properties in cancer tissue than that in the normal tissue due the local increase of the water molecule content in tissue and increased blood supply to the cancer affected tissue. Additional image contrast parameters and cancer biomarkers that have been reported to contribute to THz image contrast include cell structural changes, molecular density, interactions between agents (e.g., contrast agents and embedding agents) and biological tissue as well as tissue substances like proteins, fiber and fat etc. In this paper, we have presented a systematic and comprehensive review of the advancements in the technological development of THz technology for cancer imaging applications. Initially, the fundamentals principles and techniques for THz radiation generation and detection, imaging and spectroscopy are introduced. Further, the application of THz imaging for detection of various cancers tissues are presented, with more focus on the in vivo imaging of skin cancer. The data processing techniques for THz data are briefly discussed. Also, we identify the advantages and existing challenges in THz based cancer detection and report the performance improvement techniques. The recent advancements towards THz systems which are optimized and miniaturized are also reported. Finally, the integration of THz systems with artificial intelligent (AI), internet of things (IoT), cloud computing, big data analytics, robotics etc. for more sophisticated systems is proposed. This will facilitate the large-scale clinical applications of THz for smart and connected next generation healthcare systems and provide a roadmap for future research.}, } @article {pmid36584089, year = {2022}, author = {Yang, D and Yu, J and Du, X and He, Z and Li, P}, title = {Energy saving strategy of cloud data computing based on convolutional neural network and policy gradient algorithm.}, journal = {PloS one}, volume = {17}, number = {12}, pages = {e0279649}, pmid = {36584089}, issn = {1932-6203}, mesh = {*Algorithms ; *Neural Networks, Computer ; Computer Simulation ; Cloud Computing ; Physical Phenomena ; }, abstract = {Cloud Data Computing (CDC) is conducive to precise energy-saving management of user data centers based on the real-time energy consumption monitoring of Information Technology equipment. This work aims to obtain the most suitable energy-saving strategies to achieve safe, intelligent, and visualized energy management. First, the theory of Convolutional Neural Network (CNN) is discussed. Besides, an intelligent energy-saving model based on CNN is designed to ameliorate the variable energy consumption, load, and power consumption of the CDC data center. Then, the core idea of the policy gradient (PG) algorithm is introduced. In addition, a CDC task scheduling model is designed based on the PG algorithm, aiming at the uncertainty and volatility of the CDC scheduling tasks. Finally, the performance of different neural network models in the training process is analyzed from the perspective of total energy consumption and load optimization of the CDC center. At the same time, simulation is performed on the CDC task scheduling model based on the PG algorithm to analyze the task scheduling demand. The results demonstrate that the energy consumption of the CNN algorithm in the CDC energy-saving model is better than that of the Elman algorithm and the ecoCloud algorithm. Besides, the CNN algorithm reduces the number of virtual machine migrations in the CDC energy-saving model by 9.30% compared with the Elman algorithm. The Deep Deterministic Policy Gradient (DDPG) algorithm performs the best in task scheduling of the cloud data center, and the average response time of the DDPG algorithm is 141. In contrast, the Deep Q Network algorithm performs poorly. This paper proves that Deep Reinforcement Learning (DRL) and neural networks can reduce the energy consumption of CDC and improve the completion time of CDC tasks, offering a research reference for CDC resource scheduling.}, } @article {pmid36575310, year = {2022}, author = {Wang, J and Li, X and Wang, X and Zhou, S and Luo, Y}, title = {Farmland quality assessment using deep fully convolutional neural networks.}, journal = {Environmental monitoring and assessment}, volume = {195}, number = {1}, pages = {239}, pmid = {36575310}, issn = {1573-2959}, support = {21KJB170010//Natural Science Foundation of the Higher Education Institutions of Jiangsu Province, China/ ; 42201282//Young Scientists Fund of the National Natural Science Foundation of China/ ; 42271271//National Natural Science Foundation of China/ ; }, mesh = {*Environmental Monitoring ; *Farms ; Image Processing, Computer-Assisted/methods ; Machine Learning ; *Neural Networks, Computer ; *Agriculture/methods ; }, abstract = {Farmland is the cornerstone of agriculture and is important for food security and social production. Farmland assessment is essential but traditional methods are usually expensive and slow. Deep learning methods have been developed and widely applied recently in image recognition, semantic understanding, and many other application domains. In this research, we used fully convolutional networks (FCN) as the deep learning model to evaluate farmland grades. Normalized difference vegetation index (NDVI) derived from Landsat images was used as the input data, and the China National Cultivated Land Grade Database within Jiangsu Province was used to train the model on cloud computing. We also applied an image segmentation method to improve the original results from the FCN and compared the results with classical machine learning (ML) methods. Our research found that the FCN can predict farmland grades with an overall F1 score (the harmonic mean of precision and recall) of 0.719 and F1 score of 0.909, 0.590, 0.740, 0.642, and 0.023 for non-farmland, level I, II, III, and IV farmland, respectively. Combining the FCN and image segmentation method can further improve prediction accuracy with results of fewer noise pixels and more realistic edges. Compared with conventional ML, at least in farmland evaluation, FCN provides better results with higher precision, recall, and F1 score. Our research indicates that by using remote sensing NDVI data, the deep learning method can provide acceptable farmland assessment without fieldwork and can be used as a novel supplement to traditional methods. The method used in this research will save a lot of time and cost compared with traditional means.}, } @article {pmid36575255, year = {2023}, author = {Niyazi, M and Behnamian, J}, title = {Application of cloud computing and big data in three-stage dynamic modeling of disaster relief logistics and wounded transportation: a case study.}, journal = {Environmental science and pollution research international}, volume = {30}, number = {13}, pages = {38121-38140}, doi = {10.1007/s11356-022-24770-3}, pmid = {36575255}, issn = {1614-7499}, mesh = {Humans ; Cloud Computing ; Big Data ; *Disaster Planning ; *Disasters ; *Earthquakes ; }, abstract = {Collecting and sharing information about affected areas is an important activity for optimal decision-making in relief processes. Defects such as over-sending some items to affected areas and mistakes in transferring injured people to medical centers in accidents are due to improper management of this information. Because cloud computing as a processing and storage platform for big data is independent of the device and location and can also perform high-speed processing, its use in disasters has been highly regarded by researchers. In this environment, a three-stage dynamic procedure for evacuation operations and logistics issues is presented. The first stage of the proposed model is image processing and tweet mining in a cloud center in order to determine the disaster parameters. In stage II, a mixed-integer multi-commodity model is presented for the relief commodity delivery, wounded people transportation with capacity constraints, and locating of the possible on-site clinics and local distribution centers near disaster areas. In stage III, by using a system of equations, detailed vehicle load/unload instructions are obtained. Finally, the effectiveness of the proposed model on the data of an earthquake disaster in Iran is investigated. The results of comparing the proposed approach with a two-stage algorithm show that the total number of unsatisfied demand for all types of commodities in the proposed approach was better than the other. Also, the number of survivors in the three-stage model is significantly higher than in the two-stage one. The better performance of the proposed algorithm is due to the fact that online data is continuously available and that decisions such as sending relief items and dispatching are made more effectively.}, } @article {pmid36572709, year = {2022}, author = {Khan, S and Khan, HU and Nazir, S}, title = {Systematic analysis of healthcare big data analytics for efficient care and disease diagnosing.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {22377}, pmid = {36572709}, issn = {2045-2322}, mesh = {*Data Science ; *Delivery of Health Care ; Big Data ; Information Systems ; Machine Learning ; }, abstract = {Big data has revolutionized the world by providing tremendous opportunities for a variety of applications. It contains a gigantic amount of data, especially a plethora of data types that has been significantly useful in diverse research domains. In healthcare domain, the researchers use computational devices to extract enriched relevant information from this data and develop smart applications to solve real-life problems in a timely fashion. Electronic health (eHealth) and mobile health (mHealth) facilities alongwith the availability of new computational models have enabled the doctors and researchers to extract relevant information and visualize the healthcare big data in a new spectrum. Digital transformation of healthcare systems by using of information system, medical technology, handheld and smart wearable devices has posed many challenges to researchers and caretakers in the form of storage, minimizing treatment cost, and processing time (to extract enriched information, and minimize error rates to make optimum decisions). In this research work, the existing literature is analysed and assessed, to identify gaps that result in affecting the overall performance of the available healthcare applications. Also, it aims to suggest enhanced solutions to address these gaps. In this comprehensive systematic research work, the existing literature reported during 2011 to 2021, is thoroughly analysed for identifying the efforts made to facilitate the doctors and practitioners for diagnosing diseases using healthcare big data analytics. A set of rresearch questions are formulated to analyse the relevant articles for identifying the key features and optimum management solutions, and laterally use these analyses to achieve effective outcomes. The results of this systematic mapping conclude that despite of hard efforts made in the domains of healthcare big data analytics, the newer hybrid machine learning based systems and cloud computing-based models should be adapted to reduce treatment cost, simulation time and achieve improved quality of care. This systematic mapping will also result in enhancing the capabilities of doctors, practitioners, researchers, and policymakers to use this study as evidence for future research.}, } @article {pmid36570052, year = {2022}, author = {Zahid, MA and Akhtar, A and Shafiq, B and Shamail, S and Afzal, A and Vaidya, J}, title = {An Integrated Framework for Fault Resolution in Business Processes.}, journal = {IEEE International Conference on Web Services : proceedings. IEEE International Conference on Web Services}, volume = {2022}, number = {}, pages = {266-275}, pmid = {36570052}, issn = {2770-8144}, support = {R01 GM118574/GM/NIGMS NIH HHS/United States ; R35 GM134927/GM/NIGMS NIH HHS/United States ; }, abstract = {Cloud and edge-computing based platforms have enabled rapid development of distributed business process (BP) applications in a plug and play manner. However, these platforms do not provide the needed capabilities for identifying or repairing faults in BPs. Faults in BP may occur due to errors made by BP designers because of their lack of understanding of the underlying component services, misconfiguration of these services, or incorrect/incomplete BP workflow specifications. Such faults may not be discovered at design or development stage and may occur at runtime. In this paper, we present a unified framework for automated fault resolution in BPs. The proposed framework employs a novel and efficient fault resolution approach that extends the generate-and-validate program repair approach. In addition, we propose a hybrid approach that performs fault resolution by analyzing a faulty BP in isolation as well as by comparing with other BPs using similar services. This hybrid approach results in improved accuracy and broader coverage of fault types. We also perform an extensive experimental evaluation to compare the effectiveness of the proposed approach using a dataset of 208 faulty BPs.}, } @article {pmid36569183, year = {2022}, author = {Mawgoud, AA and Taha, MHN and Abu-Talleb, A and Kotb, A}, title = {A deep learning based steganography integration framework for ad-hoc cloud computing data security augmentation using the V-BOINC system.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {11}, number = {1}, pages = {97}, pmid = {36569183}, issn = {2192-113X}, abstract = {In the early days of digital transformation, the automation, scalability, and availability of cloud computing made a big difference for business. Nonetheless, significant concerns have been raised regarding the security and privacy levels that cloud systems can provide, as enterprises have accelerated their cloud migration journeys in an effort to provide a remote working environment for their employees, primarily in light of the COVID-19 outbreak. The goal of this study is to come up with a way to improve steganography in ad hoc cloud systems by using deep learning. This research implementation is separated into two sections. In Phase 1, the "Ad-hoc Cloud System" idea and deployment plan were set up with the help of V-BOINC. In Phase 2, a modified form of steganography and deep learning were used to study the security of data transmission in ad-hoc cloud networks. In the majority of prior studies, attempts to employ deep learning models to augment or replace data-hiding systems did not achieve a high success rate. The implemented model inserts data images through colored images in the developed ad hoc cloud system. A systematic steganography model conceals from statistics lower message detection rates. Additionally, it may be necessary to incorporate small images beneath huge cover images. The implemented ad-hoc system outperformed Amazon AC2 in terms of performance, while the execution of the proposed deep steganography approach gave a high rate of evaluation for concealing both data and images when evaluated against several attacks in an ad-hoc cloud system environment.}, } @article {pmid36567676, year = {2022}, author = {Barot, V and Patel, DR}, title = {A physiological signal compression approach using optimized Spindle Convolutional Auto-encoder in mHealth applications.}, journal = {Biomedical signal processing and control}, volume = {73}, number = {}, pages = {103436}, pmid = {36567676}, issn = {1746-8094}, abstract = {BACKGROUND AND OBJECTIVES: The COVID-19 pandemic manifested the need of developing robust digital platforms for facilitating healthcare services such as consultancy, clinical therapies, real time remote monitoring, early diagnosis and future predictions. Innovations made using technologies such as Internet of Things (IoT), edge computing, cloud computing and artificial intelligence are helping address this crisis. The urge for remote monitoring, symptom analysis and early detection of diseases lead to tremendous increase in the deployment of wearable sensor devices. They facilitate seamless gathering of physiological data such as electrocardiogram (ECG) signals, respiration traces (RESP), galvanic skin response (GSR), pulse rate, body temperature, photoplethysmograms (PPG), oxygen saturation (SpO2) etc. For diagnosis and analysis purpose, the gathered data needs to be stored. Wearable devices operate on batteries and have a memory constraint. In mHealth application architectures, this gathered data is hence stored on cloud based servers. While transmitting data from wearable devices to cloud servers via edge devices, a lot of energy is consumed. This paper proposes a deep learning based compression model SCAElite that reduces the data volume, enabling energy efficient transmission.

RESULTS: Stress Recognition in Automobile Drivers dataset and MIT-BIH dataset from PhysioNet are used for validation of algorithm performance. The model achieves a compression ratio of up to 300 fold with reconstruction errors within 8% over the stress recognition dataset and 106.34-fold with reconstruction errors within 8% over the MIT-BIH dataset. The computational complexity of SCAElite is 51.65% less compared to state-of-the-art deep compressive model.

CONCLUSION: It is experimentally validated that SCAElite guarantees a high compression ratio with good quality restoration capabilities for physiological signal compression in mHealth applications. It has a compact architecture and is computationally more efficient compared to state-of-the-art deep compressive model.}, } @article {pmid36563043, year = {2022}, author = {Tuler de Oliveira, M and Amorim Reis, LH and Marquering, H and Zwinderman, AH and Delgado Olabarriaga, S}, title = {Perceptions of a Secure Cloud-Based Solution for Data Sharing During Acute Stroke Care: Qualitative Interview Study.}, journal = {JMIR formative research}, volume = {6}, number = {12}, pages = {e40061}, pmid = {36563043}, issn = {2561-326X}, abstract = {BACKGROUND: Acute stroke care demands fast procedures performed through the collaboration of multiple professionals across multiple organizations. Cloud computing and the wide adoption of electronic medical records (EMRs) enable health care systems to improve data availability and facilitate sharing among professionals. However, designing a secure and privacy-preserving EMR cloud-based application is challenging because it must dynamically control the access to the patient's EMR according to the needs for data during treatment.

OBJECTIVE: We developed a prototype of a secure EMR cloud-based application. The application explores the security features offered by the eHealth cloud-based framework created by the Advanced Secure Cloud Encrypted Platform for Internationally Orchestrated Solutions in Health Care Horizon 2020 project. This study aimed to collect impressions, challenges, and improvements for the prototype when applied to the use case of secure data sharing among acute care teams during emergency treatment in the Netherlands.

METHODS: We conducted 14 semistructured interviews with medical professionals with 4 prominent roles in acute care: emergency call centers, ambulance services, emergency hospitals, and general practitioner clinics. We used in-depth interviews to capture their perspectives about the application's design and functions and its use in a simulated acute care event. We used thematic analysis of interview transcripts. Participants were recruited until the collected data reached thematic saturation.

RESULTS: The participants' perceptions and feedback are presented as 5 themes identified from the interviews: current challenges (theme 1), quality of the shared EMR data (theme 2), integrity and auditability of the EMR data (theme 3), usefulness and functionality of the application (theme 4), and trust and acceptance of the technology (theme 5). The results reinforced the current challenges in patient data sharing during acute stroke care. Moreover, from the user point of view, we expressed the challenges of adopting the Advanced Secure Cloud Encrypted Platform for Internationally Orchestrated Solutions in Health Care Acute Stroke Care application in a real scenario and provided suggestions for improving the proposed technology's acceptability.

CONCLUSIONS: This study has endorsed a system that supports data sharing among acute care professionals with efficiency, but without compromising the security and privacy of the patient. This explorative study identified several significant barriers to and improvement opportunities for the future acceptance and adoption of the proposed system. Moreover, the study results highlight that the desired digital transformation should consider integrating the already existing systems instead of requesting migration to a new centralized system.}, } @article {pmid36561335, year = {2022}, author = {Sethuraman, A}, title = {Teaching computational genomics and bioinformatics on a high performance computing cluster-a primer.}, journal = {Biology methods & protocols}, volume = {7}, number = {1}, pages = {bpac032}, pmid = {36561335}, issn = {2396-8923}, support = {R15 GM143700/GM/NIGMS NIH HHS/United States ; }, abstract = {The burgeoning field of genomics as applied to personalized medicine, epidemiology, conservation, agriculture, forensics, drug development, and other fields comes with large computational and bioinformatics costs, which are often inaccessible to student trainees in classroom settings at universities. However, with increased availability of resources such as NSF XSEDE, Google Cloud, Amazon AWS, and other high-performance computing (HPC) clouds and clusters for educational purposes, a growing community of academicians are working on teaching the utility of HPC resources in genomics and big data analyses. Here, I describe the successful implementation of a semester-long (16 week) upper division undergraduate/graduate level course in Computational Genomics and Bioinformatics taught at San Diego State University in Spring 2022. Students were trained in the theory, algorithms and hands-on applications of genomic data quality control, assembly, annotation, multiple sequence alignment, variant calling, phylogenomic analyses, population genomics, genome-wide association studies, and differential gene expression analyses using RNAseq data on their own dedicated 6-CPU NSF XSEDE Jetstream virtual machines. All lesson plans, activities, examinations, tutorials, code, lectures, and notes are publicly available at https://github.com/arunsethuraman/biomi609spring2022.}, } @article {pmid36560272, year = {2022}, author = {Uslu, S and Kaur, D and Durresi, M and Durresi, A}, title = {Trustability for Resilient Internet of Things Services on 5G Multiple Access Edge Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {24}, pages = {}, pmid = {36560272}, issn = {1424-8220}, support = {1547411//National Science Foundation/ ; 2017-67003-26057//United States Department of Agriculture/ ; }, mesh = {*Internet of Things ; Cloud Computing ; Reproducibility of Results ; Internet ; Trust ; }, abstract = {Billions of Internet of Things (IoT) devices and sensors are expected to be supported by fifth-generation (5G) wireless cellular networks. This highly connected structure is predicted to attract different and unseen types of attacks on devices, sensors, and networks that require advanced mitigation strategies and the active monitoring of the system components. Therefore, a paradigm shift is needed, from traditional prevention and detection approaches toward resilience. This study proposes a trust-based defense framework to ensure resilient IoT services on 5G multi-access edge computing (MEC) systems. This defense framework is based on the trustability metric, which is an extension of the concept of reliability and measures how much a system can be trusted to keep a given level of performance under a specific successful attack vector. Furthermore, trustability is used as a trade-off with system cost to measure the net utility of the system. Systems using multiple sensors with different levels of redundancy were tested, and the framework was shown to measure the trustability of the entire system. Furthermore, different types of attacks were simulated on an edge cloud with multiple nodes, and the trustability was compared to the capabilities of dynamic node addition for the redundancy and removal of untrusted nodes. Finally, the defense framework measured the net utility of the service, comparing the two types of edge clouds with and without the node deactivation capability. Overall, the proposed defense framework based on trustability ensures a satisfactory level of resilience for IoT on 5G MEC systems, which serves as a trade-off with an accepted cost of redundant resources under various attacks.}, } @article {pmid36560073, year = {2022}, author = {El-Nahal, F and Xu, T and AlQahtani, D and Leeson, M}, title = {A Bidirectional Wavelength Division Multiplexed (WDM) Free Space Optical Communication (FSO) System for Deployment in Data Center Networks (DCNs).}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {24}, pages = {}, pmid = {36560073}, issn = {1424-8220}, support = {101008280//European Commission/ ; }, abstract = {Data centers are crucial to the growth of cloud computing. Next-generation data center networks (DCNs) will rely heavily on optical technology. Here, we have investigated a bidirectional wavelength-division-multiplexed (WDM) free space optical communication (FSO) system for deployment in optical wireless DCNs. The system was evaluated for symmetric 10 Gbps 16-quadrature amplitude modulation (16-QAM) intensity-modulated orthogonal frequency-division multiplexing (OFDM) downstream signals and 10 Gbps on-off keying (OOK) upstream signals, respectively. The transmission of optical signals over an FSO link is demonstrated using a gamma-gamma channel model. According to the bit error rate (BER) results obtained for each WDM signal, the bidirectional WDM-FSO transmission could achieve 320 Gbps over 1000 m free space transmission length. The results show that the proposed FSO topology offers an excellent alternative to fiber-based optical interconnects in DCNs, allowing for high data rate bidirectional transmission.}, } @article {pmid36555731, year = {2022}, author = {Puch-Giner, I and Molina, A and Municoy, M and Pérez, C and Guallar, V}, title = {Recent PELE Developments and Applications in Drug Discovery Campaigns.}, journal = {International journal of molecular sciences}, volume = {23}, number = {24}, pages = {}, pmid = {36555731}, issn = {1422-0067}, mesh = {Computer Simulation ; *Software ; *Drug Discovery/methods ; Models, Molecular ; Monte Carlo Method ; Drug Design ; }, abstract = {Computer simulation techniques are gaining a central role in molecular pharmacology. Due to several factors, including the significant improvements of traditional molecular modelling, the irruption of machine learning methods, the massive data generation, or the unlimited computational resources through cloud computing, the future of pharmacology seems to go hand in hand with in silico predictions. In this review, we summarize our recent efforts in such a direction, centered on the unconventional Monte Carlo PELE software and on its coupling with machine learning techniques. We also provide new data on combining two recent new techniques, aquaPELE capable of exhaustive water sampling and fragPELE, for fragment growing.}, } @article {pmid36555493, year = {2022}, author = {Nelson, TM and Ghosh, S and Postler, TS}, title = {L-RAPiT: A Cloud-Based Computing Pipeline for the Analysis of Long-Read RNA Sequencing Data.}, journal = {International journal of molecular sciences}, volume = {23}, number = {24}, pages = {}, pmid = {36555493}, issn = {1422-0067}, support = {R21 AI156616/AI/NIAID NIH HHS/United States ; R21AI156616//National Institute of Allergy and Infectious Diseases/ ; }, mesh = {*RNA/genetics ; *Cloud Computing ; Gene Expression Profiling/methods ; Computational Biology/methods ; Software ; Sequence Analysis, RNA ; High-Throughput Nucleotide Sequencing/methods ; }, abstract = {Long-read sequencing (LRS) has been adopted to meet a wide variety of research needs, ranging from the construction of novel transcriptome annotations to the rapid identification of emerging virus variants. Amongst other advantages, LRS preserves more information about RNA at the transcript level than conventional high-throughput sequencing, including far more accurate and quantitative records of splicing patterns. New studies with LRS datasets are being published at an exponential rate, generating a vast reservoir of information that can be leveraged to address a host of different research questions. However, mining such publicly available data in a tailored fashion is currently not easy, as the available software tools typically require familiarity with the command-line interface, which constitutes a significant obstacle to many researchers. Additionally, different research groups utilize different software packages to perform LRS analysis, which often prevents a direct comparison of published results across different studies. To address these challenges, we have developed the Long-Read Analysis Pipeline for Transcriptomics (L-RAPiT), a user-friendly, free pipeline requiring no dedicated computational resources or bioinformatics expertise. L-RAPiT can be implemented directly through Google Colaboratory, a system based on the open-source Jupyter notebook environment, and allows for the direct analysis of transcriptomic reads from Oxford Nanopore and PacBio LRS machines. This new pipeline enables the rapid, convenient, and standardized analysis of publicly available or newly generated LRS datasets.}, } @article {pmid36554175, year = {2022}, author = {Liu, C and Jiao, J and Li, W and Wang, J and Zhang, J}, title = {Tr-Predictior: An Ensemble Transfer Learning Model for Small-Sample Cloud Workload Prediction.}, journal = {Entropy (Basel, Switzerland)}, volume = {24}, number = {12}, pages = {}, pmid = {36554175}, issn = {1099-4300}, support = {61902112//National Natural Science Foundation of China 404 under Grant/ ; 23A520036//Application Research Plan of Key Scientific Research Projects in 405 Henan University/ ; GCIS202115//Guangxi Key Laboratory of Cryptography and Information 406 Security/ ; }, abstract = {Accurate workload prediction plays a key role in intelligent scheduling decisions on cloud platforms. There are massive amounts of short-workload sequences in the cloud platform, and the small amount of data and the presence of outliers make accurate workload sequence prediction a challenge. For the above issues, this paper proposes an ensemble learning method based on sample weight transfer and long short-term memory (LSTM), termed as Tr-Predictor. Specifically, a selection method of similar sequences combining time warp edit distance (TWED) and transfer entropy (TE) is proposed to select a source domain dataset with higher similarity for the target workload sequence. Then, we upgrade the basic learner of the ensemble model two-stage TrAdaBoost.R2 to LSTM in the deep model and enhance the ability of the ensemble model to extract sequence features. To optimize the weight adjustment strategy, we adopt a two-stage weight adjustment strategy and select the best weight for the learner according to the sample error and model error. Finally, the above process determines the parameters of the target model and uses the target model to predict the short-task sequences. In the experimental validation, we arbitrarily select nine sets of short-workload data from the Google dataset and three sets of short-workload data from the Alibaba cluster to verify the prediction effectiveness of the proposed algorithm. The experimental results show that compared with the commonly used cloud workload prediction methods Tr-Predictor has higher prediction accuracy on the small-sample workload. The prediction indicators of the ablation experiments show the performance gain of each part in the proposed method.}, } @article {pmid36550311, year = {2023}, author = {Pietris, J and Bacchi, S and Tan, Y and Kovoor, J and Gupta, A and Chan, W}, title = {Safety always: the challenges of cloud computing in medical practice and ophthalmology.}, journal = {Eye (London, England)}, volume = {37}, number = {12}, pages = {2436-2437}, pmid = {36550311}, issn = {1476-5454}, mesh = {Humans ; *Cloud Computing ; *Ophthalmology ; Software ; }, } @article {pmid36547491, year = {2022}, author = {Martin, J and Cantero, D and González, M and Cabrera, A and Larrañaga, M and Maltezos, E and Lioupis, P and Kosyvas, D and Karagiannidis, L and Ouzounoglou, E and Amditis, A}, title = {Embedded Vision Intelligence for the Safety of Smart Cities.}, journal = {Journal of imaging}, volume = {8}, number = {12}, pages = {}, pmid = {36547491}, issn = {2313-433X}, support = {883522//European Commission/ ; }, abstract = {Advances in Artificial intelligence (AI) and embedded systems have resulted on a recent increase in use of image processing applications for smart cities' safety. This enables a cost-adequate scale of automated video surveillance, increasing the data available and releasing human intervention. At the same time, although deep learning is a very intensive task in terms of computing resources, hardware and software improvements have emerged, allowing embedded systems to implement sophisticated machine learning algorithms at the edge. Additionally, new lightweight open-source middleware for constrained resource devices, such as EdgeX Foundry, have appeared to facilitate the collection and processing of data at sensor level, with communication capabilities to exchange data with a cloud enterprise application. The objective of this work is to show and describe the development of two Edge Smart Camera Systems for safety of Smart cities within S4AllCities H2020 project. Hence, the work presents hardware and software modules developed within the project, including a custom hardware platform specifically developed for the deployment of deep learning models based on the I.MX8 Plus from NXP, which considerably reduces processing and inference times; a custom Video Analytics Edge Computing (VAEC) system deployed on a commercial NVIDIA Jetson TX2 platform, which provides high level results on person detection processes; and an edge computing framework for the management of those two edge devices, namely Distributed Edge Computing framework, DECIoT. To verify the utility and functionality of the systems, extended experiments were performed. The results highlight their potential to provide enhanced situational awareness and demonstrate the suitability for edge machine vision applications for safety in smart cities.}, } @article {pmid36547481, year = {2022}, author = {Saad El Imanni, H and El Harti, A and Hssaisoune, M and Velastegui-Montoya, A and Elbouzidi, A and Addi, M and El Iysaouy, L and El Hachimi, J}, title = {Rapid and Automated Approach for Early Crop Mapping Using Sentinel-1 and Sentinel-2 on Google Earth Engine; A Case of a Highly Heterogeneous and Fragmented Agricultural Region.}, journal = {Journal of imaging}, volume = {8}, number = {12}, pages = {}, pmid = {36547481}, issn = {2313-433X}, abstract = {Accurate and rapid crop type mapping is critical for agricultural sustainability. The growing trend of cloud-based geospatial platforms provides rapid processing tools and cloud storage for remote sensing data. In particular, a variety of remote sensing applications have made use of publicly accessible data from the Sentinel missions of the European Space Agency (ESA). However, few studies have employed these data to evaluate the effectiveness of Sentinel-1, and Sentinel-2 spectral bands and Machine Learning (ML) techniques in challenging highly heterogeneous and fragmented agricultural landscapes using the Google Earth Engine (GEE) cloud computing platform. This work aims to map, accurately and early, the crop types in a highly heterogeneous and fragmented agricultural region of the Tadla Irrigated Perimeter (TIP) as a case study using the high spatiotemporal resolution of Sentinel-1, Sentinel-2, and a Random Forest (RF) classifier implemented on GEE. More specifically, five experiments were performed to assess the optical band reflectance values, vegetation indices, and SAR backscattering coefficients on the accuracy of crop classification. Besides, two scenarios were used to assess the monthly temporal windows on classification accuracy. The findings of this study show that the fusion of Sentinel-1 and Sentinel-2 data can accurately produce the early crop mapping of the studied area with an Overall Accuracy (OA) reaching 95.02%. The scenarios prove that the monthly time series perform better in terms of classification accuracy than single monthly windows images. Red-edge and shortwave infrared bands can improve the accuracy of crop classification by 1.72% when compared to only using traditional bands (i.e., visible and near-infrared bands). The inclusion of two common vegetation indices (The Normalized Vegetation Index (NDVI), the Enhanced Vegetation Index (EVI)) and Sentinel-1 backscattering coefficients to the crop classification enhanced the overall classification accuracy by 0.02% and 2.94%, respectively, compared to using the Sentinel-2 reflectance bands alone. The monthly windows analysis indicated that the improvement in the accuracy of crop classification is the greatest when the March images are accessible, with an OA higher than 80%.}, } @article {pmid36544470, year = {2023}, author = {Bang, I and Khanh Nong, L and Young Park, J and Thi Le, H and Mok Lee, S and Kim, D}, title = {ChEAP: ChIP-exo analysis pipeline and the investigation of Escherichia coli RpoN protein-DNA interactions.}, journal = {Computational and structural biotechnology journal}, volume = {21}, number = {}, pages = {99-104}, pmid = {36544470}, issn = {2001-0370}, abstract = {Genome-scale studies of the bacterial regulatory network have been leveraged by declining sequencing cost and advances in ChIP (chromatin immunoprecipitation) methods. Of which, ChIP-exo has proven competent with its near-single base-pair resolution. While several algorithms and programs have been developed for different analytical steps in ChIP-exo data processing, there is a lack of effort in incorporating them into a convenient bioinformatics pipeline that is intuitive and publicly available. In this paper, we developed ChIP-exo Analysis Pipeline (ChEAP) that executes the one-step process, starting from trimming and aligning raw sequencing reads to visualization of ChIP-exo results. The pipeline was implemented on the interactive web-based Python development environment - Jupyter Notebook, which is compatible with the Google Colab cloud platform to facilitate the sharing of codes and collaboration among researchers. Additionally, users could exploit the free GPU and CPU resources allocated by Colab to carry out computing tasks regardless of the performance of their local machines. The utility of ChEAP was demonstrated with the ChIP-exo datasets of RpoN sigma factor in E. coli K-12 MG1655. To analyze two raw data files, ChEAP runtime was 2 min and 25 s. Subsequent analyses identified 113 RpoN binding sites showing a conserved RpoN binding pattern in the motif search. ChEAP application in ChIP-exo data analysis is extensive and flexible for the parallel processing of data from various organisms.}, } @article {pmid36541007, year = {2023}, author = {Holko, M and Weber, N and Lunt, C and Brenner, SE}, title = {Biomedical research in the Cloud: considerations for researchers and organizations moving to (or adding) cloud computing resources.}, journal = {Pacific Symposium on Biocomputing. Pacific Symposium on Biocomputing}, volume = {28}, number = {}, pages = {536-540}, pmid = {36541007}, issn = {2335-6936}, mesh = {Humans ; *Computational Biology ; Cloud Computing ; Reproducibility of Results ; *Biomedical Research ; Information Dissemination ; }, abstract = {As biomedical research data grow, researchers need reliable and scalable solutions for storage and compute. There is also a need to build systems that encourage and support collaboration and data sharing, to result in greater reproducibility. This has led many researchers and organizations to use cloud computing [1]. The cloud not only enables scalable, on-demand resources for storage and compute, but also collaboration and continuity during virtual work, and can provide superior security and compliance features. Moving to or adding cloud resources, however, is not trivial or without cost, and may not be the best choice in every scenario. The goal of this workshop is to explore the benefits of using the cloud in biomedical and computational research, and considerations (pros and cons) for a range of scenarios including individual researchers, collaborative research teams, consortia research programs, and large biomedical research agencies / organizations.}, } @article {pmid36537002, year = {2023}, author = {Crowley, MA and Stockdale, CA and Johnston, JM and Wulder, MA and Liu, T and McCarty, JL and Rieb, JT and Cardille, JA and White, JC}, title = {Towards a whole-system framework for wildfire monitoring using Earth observations.}, journal = {Global change biology}, volume = {29}, number = {6}, pages = {1423-1436}, doi = {10.1111/gcb.16567}, pmid = {36537002}, issn = {1365-2486}, support = {CGSD2-534128-2019//Natural Sciences and Engineering Research Council of Canada/ ; }, mesh = {*Wildfires ; Ecosystem ; *Fires ; Forests ; }, abstract = {Fire seasons have become increasingly variable and extreme due to changing climatological, ecological, and social conditions. Earth observation data are critical for monitoring fires and their impacts. Herein, we present a whole-system framework for identifying and synthesizing fire monitoring objectives and data needs throughout the life cycle of a fire event. The four stages of fire monitoring using Earth observation data include the following: (1) pre-fire vegetation inventories, (2) active-fire monitoring, (3) post-fire assessment, and (4) multi-scale synthesis. We identify the challenges and opportunities associated with current approaches to fire monitoring, highlighting four case studies from North American boreal, montane, and grassland ecosystems. While the case studies are localized to these ecosystems and regional contexts, they provide insights for others experiencing similar monitoring challenges worldwide. The field of remote sensing is experiencing a rapid proliferation of new data sources, providing observations that can inform all aspects of our fire monitoring framework; however, significant challenges for meeting fire monitoring objectives remain. We identify future opportunities for data sharing and rapid co-development of information products using cloud computing that benefits from open-access Earth observation and other geospatial data layers.}, } @article {pmid36536803, year = {2022}, author = {Bao, G and Guo, P}, title = {Federated learning in cloud-edge collaborative architecture: key technologies, applications and challenges.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {11}, number = {1}, pages = {94}, pmid = {36536803}, issn = {2192-113X}, abstract = {In recent years, with the rapid growth of edge data, the novel cloud-edge collaborative architecture has been proposed to compensate for the lack of data processing power of traditional cloud computing. On the other hand, on account of the increasing demand of the public for data privacy, federated learning has been proposed to compensate for the lack of security of traditional centralized machine learning. Deploying federated learning in cloud-edge collaborative architecture is widely considered to be a promising cyber infrastructure in the future. Although each cloud-edge collaboration and federated learning is hot research topic respectively at present, the discussion of deploying federated learning in cloud-edge collaborative architecture is still in its infancy and little research has been conducted. This article aims to fill the gap by providing a detailed description of the critical technologies, challenges, and applications of deploying federated learning in cloud-edge collaborative architecture, and providing guidance on future research directions.}, } @article {pmid36534206, year = {2022}, author = {Ruifeng, L and Kai, Y and Xing, L and Xiaoli, L and Xitao, Z and Xiaocheng, G and Juan, F and Shixin, C}, title = {Extraction and spatiotemporal changes of open-pit mines during 1985-2020 using Google Earth Engine: A case study of Qingzhou City, Shandong Province, China.}, journal = {Environmental monitoring and assessment}, volume = {195}, number = {1}, pages = {209}, pmid = {36534206}, issn = {1573-2959}, support = {2019LY010//Shandong Agricultural Science and Technology Fund (Forestry Science and Technology Innovation)/ ; SJCX21_1126//Postgraduate Research & Practice Innovation Program of Jiangsu Province/ ; KYCX21_2628//Postgraduate Research & Practice Innovation Program of Jiangsu Province/ ; }, mesh = {*Search Engine ; *Environmental Monitoring/methods ; Mining ; Environment ; Cities ; China ; }, abstract = {The global use of mineral resources has increased exponentially for decades and will continue to grow for the foreseeable future, resulting in increasingly negative impacts on the surrounding environment. However, to date, there are a lack of historical and current spatial extent datasets with high accuracy for mining areas in many parts of the world, which has hindered a more comprehensive understanding of the environmental impacts of mining. Using the Google Earth Engine cloud platform and the Landsat normalized difference vegetation index (NDVI) datasets, the spatial extent data of open-pit mining areas for eight years (1985, 1990, 1995, 2000, 2005, 2010, 2015, and 2020) was extracted by the Otsu algorithm. The limestone mining areas in Qingzhou, Shandong Province, China, was selected as a case study. The annual maximum NDVI was first derived from the Landsat NDVI datasets, and then the Otsu algorithm was used to segment the annual maximum NDVI images to obtain the extent of the mining areas. Finally, the spatiotemporal characteristics of the mining areas in the study region were analyzed in reference to previous survey data. The results showed that the mining areas were primarily located in Shaozhuang Town, Wangfu Street and the northern part of Miaozi Town, and the proportion of mining areas within these three administrative areas has increased annually from 88% in 1985 to more than 98% in 2010. Moreover, the open-pit mining areas in Qingzhou gradually expanded from a scattered, point-like distribution to a large, contiguous distribution. From 1985 to 2020, the open-pit mining area expanded to more than 10 times its original size at a rate of 0.5 km[2]/year. In 2015, this area reached its maximum size of 19.7 km[2] and slightly decreased in 2020. Furthermore, the expansion of the mining areas in Qingzhou went through three stages: a slow growth period before 1995, a rapid expansion period from 1995 to 2005, and a shutdown and remediation period after 2005. A quantitative accuracy assessment was performed by calculating the Intersection over Union (IoU) of the extraction results and the visual interpretation results from Gaofen-2 images with 1-m spatial resolution. The IoU reached 72%. The results showed that it was feasible to threshold the Landsat annual maximum NDVI data by the Otsu algorithm to extract the annual spatial extent of the open-pit mining areas. Our method will be easily transferable to other regions worldwide, enabling the monitoring of mine environments.}, } @article {pmid36530862, year = {2022}, author = {Tsai, CW and Lee, LY and Cheng, YP and Lin, CH and Hung, ML and Lin, JW}, title = {Integrating online meta-cognitive learning strategy and team regulation to develop students' programming skills, academic motivation, and refusal self-efficacy of Internet use in a cloud classroom.}, journal = {Universal access in the information society}, volume = {}, number = {}, pages = {1-16}, pmid = {36530862}, issn = {1615-5297}, abstract = {With the development of technology and demand for online courses, there have been considerable quantities of online, blended, or flipped courses designed and provided. However, in the technology-enhanced learning environments, which are also full of social networking websites, shopping websites, and free online games, it is challenging to focus students' attention and help them achieve satisfactory learning performance. In addition, the instruction of programming courses constantly challenges both teachers and students, particularly in online learning environments. To overcome and solve these problems and to facilitate students' learning, the researchers in this study integrated two teaching approaches, using meta-cognitive learning strategy (MCLS) and team regulation (TR), to develop students' regular learning habits and further contribute to their programming skills, academic motivation, and refusal self-efficacy of Internet use, in a cloud classroom. In this research, a quasi-experiment was conducted to investigate the effects of MCLS and TR adopting the experimental design of a 2 (MCLS vs. non-MCLS) × 2 (TR vs. non-TR) factorial pre-test/post-test. In this research, the participants consisted of four classes of university students from non-information or computer departments enrolled in programming design, a required course. The experimental groups comprised three of the classes, labelled as G1, G2, and G3. G1 concurrently received both the online MCLS and TR intervention, while G2 only received the online MCLS intervention, and G3 only received the online TR intervention. Serving as the control group, the fourth class (G4) received traditional teaching. This study investigated the effects of MCLS, TR, and their combination, on improving students' programming skills, academic motivation, and refusal self-efficacy of Internet use in an online computing course. According to the results, students who received online TR significantly enhanced their programming design skills and their refusal self-efficacy of Internet use a cloud classroom. However, the expected effects of MCLS on developing students' programming skills, academic motivation, and refusal self-efficacy of Internet use were not found in this study. The teaching strategy of integrating MCLS and TR in an online programming course in this study can serve as a reference for educators when conducting online, blended, or flipped courses during the COVID-19 pandemic.}, } @article {pmid36523099, year = {2022}, author = {Wang, S and Chen, B and Liang, R and Liu, L and Chen, H and Gao, M and Wu, J and Ju, W and Ho, PH}, title = {Energy-efficient workload allocation in edge-cloud fiber-wireless networks.}, journal = {Optics express}, volume = {30}, number = {24}, pages = {44186-44200}, doi = {10.1364/OE.472978}, pmid = {36523099}, issn = {1094-4087}, abstract = {In order to realize the green computing of the edge-cloud fiber-wireless networks, the cooperation between the edge servers and the cloud servers is particularly important to reduce the network energy consumption. Therefore, this paper proposes an energy-efficient workload allocation (EEWA) scheme to improve the energy efficiency by using the architecture of edge-cloud fiber-wireless networks. The feasibility of the proposed EEWA scheme was verified on our SDN testbed. We also do the simulation to obtain the optimal results for a given set of task requests. Simulation results show that our proposed EEWA scheme greatly reduces the blocking probability and the average energy consumption of task requests in edge-cloud fiber-wireless networks.}, } @article {pmid36517473, year = {2022}, author = {Ogasawara, O}, title = {Building cloud computing environments for genome analysis in Japan.}, journal = {Human genome variation}, volume = {9}, number = {1}, pages = {46}, pmid = {36517473}, issn = {2054-345X}, support = {JP19km0405501//Japan Agency for Medical Research and Development (AMED)/ ; }, abstract = {This review article describes the current status of data archiving and computational infrastructure in the field of genomic medicine, focusing primarily on the situation in Japan. I begin by introducing the status of supercomputer operations in Japan, where a high-performance computing infrastructure (HPCI) is operated to meet the diverse computational needs of science in general. Since this HPCI consists of supercomputers of various architectures located across the nation connected via a high-speed network, including supercomputers specialized in genome science, the status of its response to the explosive increase in genomic data, including the International Nucleotide Sequence Database Collaboration (INSDC) data archive, is explored. Separately, since it is clear that the use of commercial cloud computing environments needs to be promoted, both in light of the rapid increase in computing demands and to support international data sharing and international data analysis projects, I explain how the Japanese government has established a series of guidelines for the use of cloud computing based on its cybersecurity strategy and has begun to build a government cloud for government agencies. I will also carefully consider several other issues of user concern. Finally, I will show how Japan's major cloud computing infrastructure is currently evolving toward a multicloud and hybrid cloud configuration.}, } @article {pmid36516515, year = {2023}, author = {Zhou, Y and Luo, B and Sang, J and Li, C and Zhu, M and Zhu, Z and Dai, J and Wang, J and Chen, H and Zhai, S and Lu, L and Liu, H and Yu, G and Ye, J and Zhang, Z and Huan, J}, title = {A cloud-based consultation and collaboration system for radiotherapy: Remote decision support services for community radiotherapy centers.}, journal = {Computer methods and programs in biomedicine}, volume = {229}, number = {}, pages = {107270}, doi = {10.1016/j.cmpb.2022.107270}, pmid = {36516515}, issn = {1872-7565}, mesh = {Humans ; *Radiotherapy, Intensity-Modulated/methods ; Radiotherapy Planning, Computer-Assisted/methods ; Cloud Computing ; Radiometry ; Computer Simulation ; Radiotherapy Dosage ; }, abstract = {PURPOSE: This study aimed to establish a cloud-based radiotherapy consultation and collaboration system, then investigated the practicability of remote decision support for community radiotherapy centers using the system.

METHODS AND MATERIALS: A cloud-based consultation and collaboration system for radiotherapy, OncoEvidance®, was developed to provide remote services of LINAC modeling, simulation CT data import/export, target volume and organ-at-risk delineation, prescription, and treatment planning. The system was deployed on a hybrid cloud. A federate of public nodes, each corresponding to a medical institution, are managed by a central node where a group of consultants have registered. Users can access the system through network using computing devices. The system has been tested at three community radiotherapy centers. One accelerator was modeled. 12 consultants participated the remote radiotherapy decision support and 77 radiation treatment plans had been evaluated remotely.

RESULTS: All the passing rates of per-beam dose verification are > 94% and all the passing rates of composite beam dose verification are > 99%. The average downloading time for one set of simulation CT data for one patient from Internet was within 1 min under the cloud download bandwidth of 8 Mbps and local network bandwidth of 100 Mbps. The average response time for one consultant to contour target volumes and make prescription was about 24 h. And that for one consultant to design and optimize a IMRT treatment plan was about 36 h. 100% of the remote plans passed the dosimetric criteria and could be imported into the local TPS for further verification.

CONCLUSION: The cloud-based consultation and collaboration system saved the travel time for consultants and provided high quality radiotherapy to patients in community centers. The under-staffed community radiotherapy centers could benefit from the remote system with lower cost and better treatment quality control.}, } @article {pmid36515465, year = {2023}, author = {Wiewiórka, M and Szmurło, A and Stankiewicz, P and Gambin, T}, title = {Cloud-native distributed genomic pileup operations.}, journal = {Bioinformatics (Oxford, England)}, volume = {39}, number = {1}, pages = {}, pmid = {36515465}, issn = {1367-4811}, support = {//Research University/ ; }, mesh = {*Software ; *Genomics/methods ; Algorithms ; Genome ; Computational Biology/methods ; }, abstract = {MOTIVATION: Pileup analysis is a building block of many bioinformatics pipelines, including variant calling and genotyping. This step tends to become a bottleneck of the entire assay since the straightforward pileup implementations involve processing of all base calls from all alignments sequentially. On the other hand, a distributed version of the algorithm faces the intrinsic challenge of splitting reads-oriented file formats into self-contained partitions to avoid costly data exchange between computational nodes.

RESULTS: Here, we present a scalable, distributed and efficient implementation of a pileup algorithm that is suitable for deploying in cloud computing environments. In particular, we implemented: (i) our custom data-partitioning algorithm optimized to work with the alignment reads, (ii) a novel and unique approach to process alignment events from sequencing reads using the MD tags, (iii) the source code micro-optimizations for recurrent operations, and (iv) a modular structure of the algorithm. We have proven that our novel approach consistently and significantly outperforms other state-of-the-art distributed tools in terms of execution time (up to 6.5× faster) and memory usage (up to 2× less), resulting in a substantial cloud cost reduction. SeQuiLa is a cloud-native solution that can be easily deployed using any managed Kubernetes and Hadoop services available in public clouds, like Microsoft Azure Cloud, Google Cloud Platform, or Amazon Web Services. Together with the already implemented distributed range join and coverage calculations, our package provides end-users with a unified SQL interface for convenient analyses of population-scale genomic data in an interactive way.

https://biodatageeks.github.io/sequila/.}, } @article {pmid36512073, year = {2022}, author = {Paul, A and K S, V and Sood, A and Bhaumik, S and Singh, KA and Sethupathi, S and Chanda, A}, title = {Suspended Particulate Matter Analysis of Pre and During Covid Lockdown Using Google Earth Engine Cloud Computing: A Case Study of Ukai Reservoir.}, journal = {Bulletin of environmental contamination and toxicology}, volume = {110}, number = {1}, pages = {7}, pmid = {36512073}, issn = {1432-0800}, mesh = {Humans ; *Particulate Matter/analysis ; Cloud Computing ; Search Engine ; *COVID-19 ; Communicable Disease Control ; }, abstract = {Presence of suspended particulate matter (SPM) in a waterbody or a river can be caused by multiple parameters such as other pollutants by the discharge of poorly maintained sewage, siltation, sedimentation, flood and even bacteria. In this study, remote sensing techniques were used to understand the effects of pandemic-induced lockdown on the SPM concentration in the lower Tapi reservoir or Ukai reservoir. The estimation was done using Landsat-8 OLI (Operational Land Imager) having radiometric resolution (12-bit) and a spatial resolution of 30 m. The Google Earth Engine (GEE) cloud computing platform was used in this study to generate the products. The GEE is a semi-automated workflow system using a robust approach designed for scientific analysis and visualization of geospatial datasets. An algorithm was deployed, and a time-series (2013-2020) analysis was done for the study area. It was found that the average mean value of SPM in Tapi River during 2020 is lowest than the last seven years at the same time.}, } @article {pmid36508783, year = {2023}, author = {Xu, X and Li, L and Zhou, H and Fan, M and Wang, H and Wang, L and Hu, Q and Cai, Q and Zhu, Y and Ji, S}, title = {MRTCM: A comprehensive dataset for probabilistic risk assessment of metals and metalloids in traditional Chinese medicine.}, journal = {Ecotoxicology and environmental safety}, volume = {249}, number = {}, pages = {114395}, doi = {10.1016/j.ecoenv.2022.114395}, pmid = {36508783}, issn = {1090-2414}, mesh = {Animals ; *Metals, Heavy/toxicity/analysis ; Medicine, Chinese Traditional ; *Metalloids/analysis ; *Mercury/analysis ; Risk Assessment ; Carcinogens/analysis ; Environmental Monitoring/methods ; }, abstract = {Traditional Chinese medicine (TCM) is still considered a global complementary or alternative medical system, but exogenous hazardous contaminants remain in TCM even after decocting. Besides, it is time-consuming to conduct a risk assessment of trace elements in TCMs with a non-automatic approach due to the wide variety of TCMs. Here, we present MRTCM, a cloud-computing infrastructure for automating the probabilistic risk assessment of metals and metalloids in TCM. MRTCM includes a consumption database and a pollutant database involving forty million rows of consumption data and fourteen types of TCM potentially toxic elements concentrations. The algorithm of probabilistic risk assessment was also packaged in MRTCM to assess the risks of eight elements with Monte Carlo simulation. The results demonstrated that 96.64% and 99.46% had no non-carcinogenic risk (hazard indices (HI) were < 1.0) for animal and herbal medicines consumers, respectively. After twenty years of exposure, less than 1% of the total carcinogenic risk (CRt) was > 10[-4] for TCM consumers, indicating that they are at potential risk for carcinogenicity. Sensitivity analysis revealed that annual consumption and concentration were the main variables affecting the assessment results. Ultimately, a priority management list of TCMs was also generated, indicating that more attention should be paid to the non-carcinogenic risks of As, Mn, and Hg and the carcinogenic risks of As and Cr in Pheretima and Cr in Arcae Conch. In general, MRTCM could significantly enhance the efficiency of risk assessment in TCM and provide reasonable guidance for policymakers to optimize risk management.}, } @article {pmid36506615, year = {2022}, author = {Zahid, MA and Shafiq, B and Shamail, S and Afzal, A and Vaidya, J}, title = {BP-DEBUG: A Fault Debugging and Resolution Tool for Business Processes.}, journal = {Proceedings. International Conference on Distributed Computing Systems}, volume = {2022}, number = {}, pages = {1306-1309}, pmid = {36506615}, issn = {2575-8411}, support = {R01 GM118574/GM/NIGMS NIH HHS/United States ; R35 GM134927/GM/NIGMS NIH HHS/United States ; }, abstract = {Cloud computing and Internet-ware software paradigm have enabled rapid development of distributed business process (BP) applications. Several tools are available to facilitate automated/ semi-automated development and deployment of such distributed BPs by orchestrating relevant service components in a plug-and-play fashion. However, the BPs developed using such tools are not guaranteed to be fault-free. In this demonstration, we present a tool called BP-DEBUG for debugging and automated repair of faulty BPs. BP-DEBUG implements our Collaborative Fault Resolution (CFR) approach that utilizes the knowledge of existing BPs with a similar set of web services fault detection and resolution in a given user BP. Essentially, CFR attempts to determine any semantic and structural differences between a faulty BP and related BPs and computes a minimum set of transformations which can be used to repair the faulty BP. Demo url: https://youtu.be/mf49oSekLOA.}, } @article {pmid36506593, year = {2022}, author = {Silversmith, W and Zlateski, A and Bae, JA and Tartavull, I and Kemnitz, N and Wu, J and Seung, HS}, title = {Igneous: Distributed dense 3D segmentation meshing, neuron skeletonization, and hierarchical downsampling.}, journal = {Frontiers in neural circuits}, volume = {16}, number = {}, pages = {977700}, pmid = {36506593}, issn = {1662-5110}, support = {R01 NS104926/NS/NINDS NIH HHS/United States ; U01 MH117072/MH/NIMH NIH HHS/United States ; U19 NS104648/NS/NINDS NIH HHS/United States ; RF1 MH117815/MH/NIMH NIH HHS/United States ; U01 MH114824/MH/NIMH NIH HHS/United States ; R01 EY027036/EY/NEI NIH HHS/United States ; }, mesh = {*Imaging, Three-Dimensional/methods ; Microscopy, Electron ; *Neurons ; Information Storage and Retrieval ; Image Processing, Computer-Assisted/methods ; }, abstract = {Three-dimensional electron microscopy images of brain tissue and their dense segmentations are now petascale and growing. These volumes require the mass production of dense segmentation-derived neuron skeletons, multi-resolution meshes, image hierarchies (for both modalities) for visualization and analysis, and tools to manage the large amount of data. However, open tools for large-scale meshing, skeletonization, and data management have been missing. Igneous is a Python-based distributed computing framework that enables economical meshing, skeletonization, image hierarchy creation, and data management using cloud or cluster computing that has been proven to scale horizontally. We sketch Igneous's computing framework, show how to use it, and characterize its performance and data storage.}, } @article {pmid36502208, year = {2022}, author = {Buriboev, A and Muminov, A}, title = {Computer State Evaluation Using Adaptive Neuro-Fuzzy Inference Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, pmid = {36502208}, issn = {1424-8220}, mesh = {Humans ; *Fuzzy Logic ; *Neural Networks, Computer ; Algorithms ; Computers ; }, abstract = {Several crucial system design and deployment decisions, including workload management, sizing, capacity planning, and dynamic rule generation in dynamic systems such as computers, depend on predictive analysis of resource consumption. An analysis of the computer components' utilizations and their workloads is the best way to assess the performance of the computer's state. Especially, analyzing the particular or whole influence of components on another component gives more reliable information about the state of computer systems. There are many evaluation techniques proposed by researchers. The bulk of them have complicated metrics and parameters such as utilization, time, throughput, latency, delay, speed, frequency, and the percentage which are difficult to understand and use in the assessing process. According to these, we proposed a simplified evaluation method using components' utilization in percentage scale and its linguistic values. The use of the adaptive neuro-fuzzy inference system (ANFIS) model and fuzzy set theory offers fantastic prospects to realize use impact analyses. The purpose of the study is to examine the usage impact of memory, cache, storage, and bus on CPU performance using the Sugeno type and Mamdani type ANFIS models to determine the state of the computer system. The suggested method is founded on keeping an eye on how computer parts behave. The developed method can be applied for all kinds of computing system, such as personal computers, mainframes, and supercomputers by considering that the inference engine of the proposed ANFIS model requires only its own behavior data of computers' components and the number of inputs can be enriched according to the type of computer, for instance, in cloud computers' case the added number of clients and network quality can be used as the input parameters. The models present linguistic and quantity results which are convenient to understand performance issues regarding specific bottlenecks and determining the relationship of components.}, } @article {pmid36502177, year = {2022}, author = {Mei, P and Karimi, HR and Chen, F and Yang, S and Huang, C and Qiu, S}, title = {A Learning-Based Vehicle-Cloud Collaboration Approach for Joint Estimation of State-of-Energy and State-of-Health.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, pmid = {36502177}, issn = {1424-8220}, mesh = {United States ; Bayes Theorem ; Physical Phenomena ; *Electric Power Supplies ; *Electricity ; Neural Networks, Computer ; }, abstract = {The state-of-energy (SOE) and state-of-health (SOH) are two crucial quotas in the battery management systems, whose accurate estimation is facing challenges by electric vehicles' (EVs) complexity and changeable external environment. Although the machine learning algorithm can significantly improve the accuracy of battery estimation, it cannot be performed on the vehicle control unit as it requires a large amount of data and computing power. This paper proposes a joint SOE and SOH prediction algorithm, which combines long short-term memory (LSTM), Bi-directional LSTM (Bi-LSTM), and convolutional neural networks (CNNs) for EVs based on vehicle-cloud collaboration. Firstly, the indicator of battery performance degradation is extracted for SOH prediction according to the historical data; the Bayesian optimization approach is applied to the SOH prediction combined with Bi-LSTM. Then, the CNN-LSTM is implemented to provide direct and nonlinear mapping models for SOE. These direct mapping models avoid parameter identification and updating, which are applicable in cases with complex operating conditions. Finally, the SOH correction in SOE estimation achieves the joint estimation with different time scales. With the validation of the National Aeronautics and Space Administration battery data set, as well as the established battery platform, the error of the proposed method is kept within 3%. The proposed vehicle-cloud approach performs high-precision joint estimation of battery SOE and SOH. It can not only use the battery historical data of the cloud platform to predict the SOH but also correct the SOE according to the predicted value of the SOH. The feasibility of vehicle-cloud collaboration is promising in future battery management systems.}, } @article {pmid36502107, year = {2022}, author = {Jing, X and Tian, X and Du, C}, title = {LPAI-A Complete AIoT Framework Based on LPWAN Applicable to Acoustic Scene Classification Scenarios.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, pmid = {36502107}, issn = {1424-8220}, support = {XDC02070800//Chinese Academy of Sciences(CAS)/ ; 22511100600//The Science and Technology Commission of Shanghai Municipality (STCSM)/ ; }, mesh = {Animals ; *Artificial Intelligence ; Acoustics ; Computer Simulation ; Reaction Time ; Recognition, Psychology ; *Ursidae ; }, abstract = {Deploying artificial intelligence on edge nodes of Low-Power Wide Area Networks can significantly reduce network transmission volumes, event response latency, and overall network power consumption. However, the edge nodes in LPWAN bear limited computing power and storage space, and researchers have found it challenging to improve the recognition capability of the nodes using sensor data from the environment. In particular, the domain-shift problem in LPWAN is challenging to overcome. In this paper, a complete AIoT system framework referred to as LPAI is presented. It is the first generic framework for implementing AIoT technology based on LPWAN applicable to acoustic scene classification scenarios. LPAI overcomes the domain-shift problem, which enables resource-constrained edge nodes to continuously improve their performance using real data to become more adaptive to the environment. For efficient use of limited resources, the edge nodes independently select representative data and transmit it back to the cloud. Moreover, the model is iteratively retrained on the cloud using the few-shot uploaded data. Finally, the feasibility of LPAI is analyzed, and simulation experiments on the public ASC dataset provide validation that our proposed framework can improve the recognition accuracy by as little as 5% using 85 actual sensor data points.}, } @article {pmid36501960, year = {2022}, author = {Wan, S and Zhao, K and Lu, Z and Li, J and Lu, T and Wang, H}, title = {A Modularized IoT Monitoring System with Edge-Computing for Aquaponics.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, pmid = {36501960}, issn = {1424-8220}, support = {61871380//National Natural Science Foundation of China/ ; 21327401D-1//Key Common Technologies for High-quality Agricultural Development/ ; }, mesh = {Animals ; *Plant Breeding ; *Electrocardiography ; Aquaculture/methods ; Algorithms ; }, abstract = {Aquaponics is a green and efficient agricultural production model that combines aquaculture and vegetable cultivation. It is worth looking into optimizing the proportion of fish and plants to improve the quality and yield. However, there is little non-destructive monitoring of plant growth in aquaponics monitoring systems currently. In this paper, based on the Internet of Things technologies, a monitoring system is designed with miniaturization, modularization, and low-cost features for cultivation-breeding ratio research. The system can realize remote monitoring and intelligent control of parameters needed to keep fish and plants under optimal conditions. First, a 32-bit chip is used as the Microcontroller Unit to develop the intelligent sensing unit, which can realize 16 different data acquisitions as stand-alone extensible modules. Second, to achieve plant data acquisition and upload, the Raspberry Pi embedded with image processing algorithms is introduced to realize edge-computing. Finally, all the collected data is stored in the Ali-cloud through Wi-Fi and a WeChat Mini Program is designed to display data and control devices. The results show that there is no packet loss within 90 m for wireless transmission, and the error rate of environment parameters is limited to 5%. It was proven that the system is intelligent, flexible, low-cost, and stable which is suitable for small-scale aquaponics well.}, } @article {pmid36501875, year = {2022}, author = {Wu, TY and Kong, F and Wang, L and Chen, YC and Kumari, S and Pan, JS}, title = {Toward Smart Home Authentication Using PUF and Edge-Computing Paradigm.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, pmid = {36501875}, issn = {1424-8220}, mesh = {*Cloud Computing ; *Communication ; Internet ; Nonoxynol ; Privacy ; }, abstract = {The smart home is a crucial embodiment of the internet of things (IoT), which can facilitate users to access smart home services anytime and anywhere. Due to the limited resources of cloud computing, it cannot meet users' real-time needs. Therefore, edge computing emerges as the times require, providing users with better real-time access and storage. The application of edge computing in the smart home environment can enable users to enjoy smart home services. However, users and smart devices communicate through public channels, and malicious attackers may intercept information transmitted through public channels, resulting in user privacy disclosure. Therefore, it is a critical issue to protect the secure communication between users and smart devices in the smart home environment. Furthermore, authentication protocols in smart home environments also have some security challenges. In this paper, we propose an anonymous authentication protocol that applies edge computing to the smart home environment to protect communication security between entities. To protect the security of smart devices, we embed physical unclonable functions (PUF) into each smart device. Real-or-random model, informal security analysis, and ProVerif are adopted to verify the security of our protocol. Finally, we compare our protocol with existing protocols regarding security and performance. The comparison results demonstrate that our protocol has higher security and slightly better performance.}, } @article {pmid36501855, year = {2022}, author = {Li, P and Cao, J}, title = {A Virtual Machine Consolidation Algorithm Based on Dynamic Load Mean and Multi-Objective Optimization in Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, pmid = {36501855}, issn = {1424-8220}, support = {62172089//National Natural Science Foundation of China/ ; }, abstract = {High energy consumption and low resource utilization have become increasingly prominent problems in cloud data centers. Virtual machine (VM) consolidation is the key technology to solve the problems. However, excessive VM consolidation may lead to service level agreement violations (SLAv). Most studies have focused on optimizing energy consumption and ignored other factors. An effective VM consolidation should comprehensively consider multiple factors, including the quality of service (QoS), energy consumption, resource utilization, migration overhead and network communication overhead, which is a multi-objective optimization problem. To solve the problems above, we propose a VM consolidation approach based on dynamic load mean and multi-objective optimization (DLMM-VMC), which aims to minimize power consumption, resources waste, migration overhead and network communication overhead while ensuring QoS. Fist, based on multi-dimensional resources consideration, the host load status is objectively evaluated by using the proposed host load detection algorithm based on the dynamic load mean to avoid an excessive VM consolidation. Then, the best solution is obtained based on the proposed multi-objective optimization model and optimized ant colony algorithm, so as to ensure the common interests of cloud service providers and users. Finally, the experimental results show that compared with the existing VM consolidation methods, our proposed algorithm has a significant improvement in the energy consumption, QoS, resources waste, SLAv, migration and network overhead.}, } @article {pmid36501828, year = {2022}, author = {Marcillo, P and Tamayo-Urgilés, D and Valdivieso Caraguay, ÁL and Hernández-Álvarez, M}, title = {Security in V2I Communications: A Systematic Literature Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, pmid = {36501828}, issn = {1424-8220}, support = {PIS 20-02//National Polytechnic School/ ; }, mesh = {*Computer Security ; *Confidentiality ; Cloud Computing ; Computer Communication Networks ; Communication ; }, abstract = {Recently, the number of vehicles equipped with wireless connections has increased considerably. The impact of that growth in areas such as telecommunications, infotainment, and automatic driving is enormous. More and more drivers want to be part of a vehicular network, despite the implications or risks that, for instance, the openness of wireless communications, its dynamic topology, and its considerable size may bring. Undoubtedly, this trend is because of the benefits the vehicular network can offer. Generally, a vehicular network has two modes of communication (V2I and V2V). The advantage of V2I over V2V is roadside units' high computational and transmission power, which assures the functioning of early warning and driving guidance services. This paper aims to discover the principal vulnerabilities and challenges in V2I communications, the tools and methods to mitigate those vulnerabilities, the evaluation metrics to measure the effectiveness of those tools and methods, and based on those metrics, the methods or tools that provide the best results. Researchers have identified the non-resistance to attacks, the regular updating and exposure of keys, and the high dependence on certification authorities as main vulnerabilities. Thus, the authors found schemes resistant to attacks, authentication schemes, privacy protection models, and intrusion detection and prevention systems. Of the solutions for providing security analyzed in this review, the authors determined that most of them use metrics such as computational cost and communication overhead to measure their performance. Additionally, they determined that the solutions that use emerging technologies such as fog/edge/cloud computing present better results than the rest. Finally, they established that the principal challenge in V2I communication is to protect and dispose of a safe and reliable communication channel to avoid adversaries taking control of the medium.}, } @article {pmid36501767, year = {2022}, author = {Hung, YH}, title = {Developing an Improved Ensemble Learning Approach for Predictive Maintenance in the Textile Manufacturing Process.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, pmid = {36501767}, issn = {1424-8220}, support = {MOST 110-2221-E-224 -047,MOST 111-2221-E-224 -033 -MY2.//Ministry of Science and Technology/ ; }, mesh = {*Machine Learning ; *Algorithms ; Data Science ; Cloud Computing ; Automation ; }, abstract = {With the rapid development of digital transformation, paper forms are digitalized as electronic forms (e-Forms). Existing data can be applied in predictive maintenance (PdM) for the enabling of intelligentization and automation manufacturing. This study aims to enhance the utilization of collected e-Form data though machine learning approaches and cloud computing to predict and provide maintenance actions. The ensemble learning approach (ELA) requires less computation time and has a simple hardware requirement; it is suitable for processing e-form data with specific attributes. This study proposed an improved ELA to predict the defective class of product data from a manufacturing site's work order form. This study proposed the resource dispatching approach to arrange data with the corresponding emailing resource for automatic notification. This study's novelty is the integration of cloud computing and an improved ELA for PdM to assist the textile product manufacturing process. The data analytics results show that the improved ensemble learning algorithm has over 98% accuracy and precision for defective product prediction. The validation results of the dispatching approach show that data can be correctly transmitted in a timely manner to the corresponding resource, along with a notification being sent to users.}, } @article {pmid36501737, year = {2022}, author = {Gul, OM}, title = {Heuristic Resource Reservation Policies for Public Clouds in the IoT Era.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, pmid = {36501737}, issn = {1424-8220}, mesh = {*Cloud Computing ; *Algorithms ; Policy ; }, abstract = {With the advances in the IoT era, the number of wireless sensor devices has been growing rapidly. This increasing number gives rise to more complex networks where more complex tasks can be executed by utilizing more computational resources from the public clouds. Cloud service providers use various pricing models for their offered services. Some models are appropriate for the cloud service user's short-term requirements whereas the other models are appropriate for the long-term requirements of cloud service users. Reservation-based price models are suitable for long-term requirements of cloud service users. We used the pricing schemes with spot and reserved instances. Reserved instances support a hybrid cost model with fixed reservation costs that vary with contract duration and an hourly usage charge which is lower than the charge of the spot instances. Optimizing resources to be reserved requires sufficient research effort. Recent algorithms proposed for this problem are generally based on integer programming problems, so they do not have polynomial time complexity. In this work, heuristic-based polynomial time policies are proposed for this problem. It is exhibited that the cost for the cloud service user which uses our approach is comparable to optimal solutions, i.e., it is near-optimal.}, } @article {pmid36500810, year = {2022}, author = {Malik, S and Dhasmana, A and Preetam, S and Mishra, YK and Chaudhary, V and Bera, SP and Ranjan, A and Bora, J and Kaushik, A and Minkina, T and Jatav, HS and Singh, RK and Rajput, VD}, title = {Exploring Microbial-Based Green Nanobiotechnology for Wastewater Remediation: A Sustainable Strategy.}, journal = {Nanomaterials (Basel, Switzerland)}, volume = {12}, number = {23}, pages = {}, pmid = {36500810}, issn = {2079-4991}, abstract = {Water scarcity due to contamination of water resources with different inorganic and organic contaminants is one of the foremost global concerns. It is due to rapid industrialization, fast urbanization, and the low efficiency of traditional wastewater treatment strategies. Conventional water treatment strategies, including chemical precipitation, membrane filtration, coagulation, ion exchange, solvent extraction, adsorption, and photolysis, are based on adopting various nanomaterials (NMs) with a high surface area, including carbon NMs, polymers, metals-based, and metal oxides. However, significant bottlenecks are toxicity, cost, secondary contamination, size and space constraints, energy efficiency, prolonged time consumption, output efficiency, and scalability. On the contrary, green NMs fabricated using microorganisms emerge as cost-effective, eco-friendly, sustainable, safe, and efficient substitutes for these traditional strategies. This review summarizes the state-of-the-art microbial-assisted green NMs and strategies including microbial cells, magnetotactic bacteria (MTB), bio-augmentation and integrated bioreactors for removing an extensive range of water contaminants addressing the challenges associated with traditional strategies. Furthermore, a comparative analysis of the efficacies of microbe-assisted green NM-based water remediation strategy with the traditional practices in light of crucial factors like reusability, regeneration, removal efficiency, and adsorption capacity has been presented. The associated challenges, their alternate solutions, and the cutting-edge prospects of microbial-assisted green nanobiotechnology with the integration of advanced tools including internet-of-nano-things, cloud computing, and artificial intelligence have been discussed. This review opens a new window to assist future research dedicated to sustainable and green nanobiotechnology-based strategies for environmental remediation applications.}, } @article {pmid36497649, year = {2022}, author = {Vărzaru, AA}, title = {Assessing Digital Transformation of Cost Accounting Tools in Healthcare.}, journal = {International journal of environmental research and public health}, volume = {19}, number = {23}, pages = {}, pmid = {36497649}, issn = {1660-4601}, mesh = {Artificial Intelligence ; Delivery of Health Care ; *Accounting ; *Blockchain ; Big Data ; }, abstract = {The expansion of digital technologies has significantly changed most economic activities and professions. Digital technologies penetrated managerial accounting and have a vast potential to transform this profession. Implementing emerging digital technologies, such as artificial intelligence, blockchain, the Internet of Things, big data, and cloud computing, can trigger a crucial leap forward, leading to a paradigm-shifting in healthcare organizations' accounting management. The paper's main objective is to investigate the perception of Romanian accountants on implementing digital technologies in healthcare organizations' accounting management. The paper implies a study based on a questionnaire among Romanian accountants who use various digital technologies implemented in traditional and innovative cost accounting tools. Based on structural equation modeling, the results emphasize the prevalence of innovative tools over traditional cost accounting tools improved through digital transformation, digital technologies assuming the most complex and time-consuming tasks. Moreover, the influence of cost accounting tools improved through digital transformation on healthcare organizations' performance is much more robust in the case of innovative tools than in the case of traditional cost accounting tools. The proposed model provides managers in healthcare organizations with information on the most effective methods in the context of digital transformation.}, } @article {pmid36495459, year = {2023}, author = {Contaldo, SG and Alessandri, L and Colonnelli, I and Beccuti, M and Aldinucci, M}, title = {Bringing Cell Subpopulation Discovery on a Cloud-HPC Using rCASC and StreamFlow.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2584}, number = {}, pages = {337-345}, pmid = {36495459}, issn = {1940-6029}, mesh = {*Software ; *Algorithms ; Workflow ; High-Throughput Nucleotide Sequencing ; Single-Cell Analysis ; Sequence Analysis, RNA ; }, abstract = {The idea behind novel single-cell RNA sequencing (scRNA-seq) pipelines is to isolate single cells through microfluidic approaches and generate sequencing libraries in which the transcripts are tagged to track their cell of origin. Modern scRNA-seq platforms are capable of analyzing up to many thousands of cells in each run. Then, combined with massive high-throughput sequencing producing billions of reads, scRNA-seq allows the assessment of fundamental biological properties of cell populations and biological systems at unprecedented resolution.In this chapter, we describe how cell subpopulation discovery algorithms, integrated into rCASC, could be efficiently executed on cloud-HPC infrastructure. To achieve this task, we focus on the StreamFlow framework which provides container-native runtime support for scientific workflows in cloud/HPC environments.}, } @article {pmid36472895, year = {2022}, author = {Barbaric, A and Munteanu, C and Ross, H and Cafazzo, JA}, title = {Design of a Patient Voice App Experience for Heart Failure Management: Usability Study.}, journal = {JMIR formative research}, volume = {6}, number = {12}, pages = {e41628}, pmid = {36472895}, issn = {2561-326X}, abstract = {BACKGROUND: The use of digital therapeutics (DTx) in the prevention and management of medical conditions has increased through the years, with an estimated 44 million people using one as part of their treatment plan in 2021, nearly double the number from the previous year. DTx are commonly accessed through smartphone apps, but offering these treatments through additional platforms can improve the accessibility of these interventions. Voice apps are an emerging technology in the digital health field; not only do they have the potential to improve DTx adherence, but they can also create a better user experience for some user groups.

OBJECTIVE: This research aimed to identify the acceptability and feasibility of offering a voice app for a chronic disease self-management program. The objective of this project was to design, develop, and evaluate a voice app of an already-existing smartphone-based heart failure self-management program, Medly, to be used as a case study.

METHODS: A voice app version of Medly was designed and developed through a user-centered design process. We conducted a usability study and semistructured interviews with patients with heart failure (N=8) at the Peter Munk Cardiac Clinic in Toronto General Hospital to better understand the user experience. A Medly voice app prototype was built using a software development kit in tandem with a cloud computing platform and was verified and validated before the usability study. Data collection and analysis were guided by a mixed methods triangulation convergence design.

RESULTS: Common themes were identified in the results of the usability study, which involved 8 participants with heart failure. Almost all participants (7/8, 88%) were satisfied with the voice app and felt confident using it, although half of the participants (4/8, 50%) were unsure about using it in the future. Six main themes were identified: changes in physical behavior, preference between voice app and smartphone, importance of music during voice app interaction, lack of privacy concerns, desired reassurances during voice app interaction, and helpful aids during voice app interaction. These findings were triangulated with the quantitative data, and it concluded that the main area for improvement was related to the ease of use; design changes were then implemented to better improve the user experience.

CONCLUSIONS: This work offered preliminary insight into the acceptability and feasibility of a Medly voice app. Given the recent emergence of voice apps in health care, we believe that this research offered invaluable insight into successfully deploying DTx for chronic disease self-management using this technology.}, } @article {pmid36470948, year = {2022}, author = {Zhao, S and Guo, X and Qu, Z and Zhang, Z and Yu, T}, title = {Intelligent retrieval method for power grid operation data based on improved SimHash and multi-attribute decision making.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {20994}, pmid = {36470948}, issn = {2045-2322}, abstract = {IN the trend of energy revolution, power data becomes one of the key elements of the power grid. And an advance power system with "electric power + computing power" as the core has become an inevitable choice. However, the traditional search approach based on directory query is commonly used for power grid operation data in domestic and international. The approach fails to effectively meet the user's need for fast, accurate and personalized retrieval of useful information from the vast amount of power grid data. It seriously affects the real-time availability of data and the efficiency of business-critical analytical decisions. For this reason, an intelligent retrieval approach for power grid operation data based on improved SimHash and multi-attribute decision making is proposed in this paper. This method elaborates the properties of SimHash and multi-attribute decision making algorithms. And an intelligent parallel retrieval algorithm MR-ST based on MapReduce model is designed. Finally, real time grid operation data from multiple sources are analyzed on the cloud platform for example. The experimental results show the effectiveness and precision of the method. Compared with traditional methods, the search accuracy rate, search completion rate and search time are significantly improved. Experiments show that the method can be applied to intelligent retrieval of power grid operation data.}, } @article {pmid36470698, year = {2023}, author = {Lee, P and Tahmasebi, A and Dave, JK and Parekh, MR and Kumaran, M and Wang, S and Eisenbrey, JR and Donuru, A}, title = {Comparison of Gray-scale Inversion to Improve Detection of Pulmonary Nodules on Chest X-rays Between Radiologists and a Deep Convolutional Neural Network.}, journal = {Current problems in diagnostic radiology}, volume = {52}, number = {3}, pages = {180-186}, doi = {10.1067/j.cpradiol.2022.11.004}, pmid = {36470698}, issn = {1535-6302}, mesh = {Humans ; X-Rays ; *Radiography, Thoracic/methods ; Retrospective Studies ; *Multiple Pulmonary Nodules/diagnostic imaging ; Neural Networks, Computer ; Radiologists ; }, abstract = {Detection of pulmonary nodules on chest x-rays is an important task for radiologists. Previous studies have shown improved detection rates using gray-scale inversion. The purpose of our study was to compare the efficacy of gray-scale inversion in improving the detection of pulmonary nodules on chest x-rays for radiologists and machine learning models (ML). We created a mixed dataset consisting of 60, 2-view (posteroanterior view - PA and lateral view) chest x-rays with computed tomography confirmed nodule(s) and 62 normal chest x-rays. Twenty percent of the cases were separated for a testing dataset (24 total images). Data augmentation through mirroring and transfer learning was used for the remaining cases (784 total images) for supervised training of 4 ML models (grayscale PA, grayscale lateral, gray-scale inversion PA, and gray-scale inversion lateral) on Google's cloud-based AutoML platform. Three cardiothoracic radiologists analyzed the complete 2-view dataset (n=120) and, for comparison to the ML, the single-view testing subsets (12 images each). Gray-scale inversion (area under the curve (AUC) 0.80, 95% confidence interval (CI) 0.75-0.85) did not improve diagnostic performance for radiologists compared to grayscale (AUC 0.84, 95% CI 0.79-0.88). Gray-scale inversion also did not improve diagnostic performance for the ML. The ML did demonstrate higher sensitivity and negative predictive value for grayscale PA (72.7% and 75.0%), grayscale lateral (63.6% and 66.6%), and gray-scale inversion lateral views (72.7% and 76.9%), comparing favorably to the radiologists (63.9% and 72.3%, 27.8% and 58.3%, 19.5% and 50.5% respectively). In the limited testing dataset, the ML did demonstrate higher sensitivity and negative predictive value for grayscale PA (72.7% and 75.0%), grayscale lateral (63.6% and 66.6%), and gray-scale inversion lateral views (72.7% and 76.9%), comparing favorably to the radiologists (63.9% and 72.3%, 27.8% and 58.3%, 19.5% and 50.5%, respectively). Further investigation of other post-processing algorithms to improve diagnostic performance of ML is warranted.}, } @article {pmid36467434, year = {2022}, author = {Lanjewar, MG and Shaikh, AY and Parab, J}, title = {Cloud-based COVID-19 disease prediction system from X-Ray images using convolutional neural network on smartphone.}, journal = {Multimedia tools and applications}, volume = {}, number = {}, pages = {1-30}, pmid = {36467434}, issn = {1380-7501}, abstract = {COVID-19 has engulfed over 200 nations through human-to-human transmission, either directly or indirectly. Reverse Transcription-polymerase Chain Reaction (RT-PCR) has been endorsed as a standard COVID-19 diagnostic procedure but has caveats such as low sensitivity, the need for a skilled workforce, and is time-consuming. Coronaviruses show significant manifestation in Chest X-Ray (CX-Ray) images and, thus, can be a viable option for an alternate COVID-19 diagnostic strategy. An automatic COVID-19 detection system can be developed to detect the disease, thus reducing strain on the healthcare system. This paper discusses a real-time Convolutional Neural Network (CNN) based system for COVID-19 illness prediction from CX-Ray images on the cloud. The implemented CNN model displays exemplary results, with training accuracy being 99.94% and validation accuracy reaching 98.81%. The confusion matrix was utilized to assess the models' outcome and achieved 99% precision, 98% recall, 99% F1 score, 100% training area under the curve (AUC) and 98.3% validation AUC. The same CX-Ray dataset was also employed to predict the COVID-19 disease with deep Convolution Neural Networks (DCNN), such as ResNet50, VGG19, InceptonV3, and Xception. The prediction outcome demonstrated that the present CNN was more capable than the DCNN models. The efficient CNN model was deployed to the Platform as a Service (PaaS) cloud.}, } @article {pmid36465713, year = {2023}, author = {Magotra, B and Malhotra, D and Dogra, AK}, title = {Adaptive Computational Solutions to Energy Efficiency in Cloud Computing Environment Using VM Consolidation.}, journal = {Archives of computational methods in engineering : state of the art reviews}, volume = {30}, number = {3}, pages = {1789-1818}, pmid = {36465713}, issn = {1886-1784}, abstract = {Cloud Computing has emerged as a computing paradigm where services are provided through the internet in recent years. Offering on-demand services has transformed the IT companies' working environment, leading to a linearly increasing trend of its usage. The provisioning of the Computing infrastructure is achieved with the help of virtual machines. A great figure of physical devices is required to satisfy the users' resource requirements. To meet the requirements of the submitted workloads that are usually dynamic, the cloud data centers cause the over-provisioning of cloud resources. The result of this over-provisioning is the resource wastage with an increase in the levels of energy consumption, causing a raised operational cost. High CO2 emissions result from this huge energy consumption by data centers, posing a threat to environmental stability. The environmental concern demands for the controlled energy consumption, which can be attained by optimal usage of resources to achieve in the server load, by minimizing the number of active nodes, and by minimizing the frequency of switching between active and de-active server mode in the data center. Motivated by these actualities, we discuss numerous statistical, deterministic, probabilistic, machine learning and optimization based computational solutions for the cloud computing environment. A comparative analysis of the computational methods, on the basis of architecture, consolidation step involved, objectives achieved, simulators involved and resources utilized, has also been presented. A taxonomy for virtual machine (VM) consolidation has also been derived in this research article followed by emerging challenges and research gaps in the field of VM consolidation in cloud computing environment.}, } @article {pmid36465318, year = {2022}, author = {Ilyas, A and Alatawi, MN and Hamid, Y and Mahfooz, S and Zada, I and Gohar, N and Shah, MA}, title = {Software architecture for pervasive critical health monitoring system using fog computing.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {11}, number = {1}, pages = {84}, pmid = {36465318}, issn = {2192-113X}, abstract = {Because of the existence of Covid-19 and its variants, health monitoring systems have become mandatory, particularly for critical patients such as neonates. However, the massive volume of real-time data generated by monitoring devices necessitates the use of efficient methods and approaches to respond promptly. A fog-based architecture for IoT healthcare systems tends to provide better services, but it also produces some issues that must be addressed. We present a bidirectional approach to improving real-time data transmission for health monitors by minimizing network latency and usage in this paper. To that end, a simplified approach for large-scale IoT health monitoring systems is devised, which provides a solution for IoT device selection of optimal fog nodes to reduce both communication and processing delays. Additionally, an improved dynamic approach for load balancing and task assignment is also suggested. Embedding the best practices from the IoT, Fog, and Cloud planes, our aim in this work is to offer software architecture for IoT-based healthcare systems to fulfill non-functional needs. 4 + 1 views are used to illustrate the proposed architecture.}, } @article {pmid36462891, year = {2022}, author = {Motwani, A and Shukla, PK and Pawar, M}, title = {Ubiquitous and smart healthcare monitoring frameworks based on machine learning: A comprehensive review.}, journal = {Artificial intelligence in medicine}, volume = {134}, number = {}, pages = {102431}, pmid = {36462891}, issn = {1873-2860}, mesh = {Aged ; Humans ; *COVID-19/epidemiology ; Delivery of Health Care ; Machine Learning ; Pandemics ; }, abstract = {During the COVID-19 pandemic, the patient care delivery paradigm rapidly shifted to remote technological solutions. Rising rates of life expectancy of older people, and deaths due to chronic diseases (CDs) such as cancer, diabetes and respiratory disease pose many challenges to healthcare. While the feasibility of Remote Patient Monitoring (RPM) with a Smart Healthcare Monitoring (SHM) framework was somewhat questionable before the COVID-19 pandemic, it is now a proven commodity and is on its way to becoming ubiquitous. More health organizations are adopting RPM to enable CD management in the absence of individual monitoring. The current studies on SHM have reviewed the applications of IoT and/or Machine Learning (ML) in the domain, their architecture, security, privacy and other network related issues. However, no study has analyzed the AI and ubiquitous computing advances in SHM frameworks. The objective of this research is to identify and map key technical concepts in the SHM framework. In this context an interesting and meaningful classification of the research articles surveyed for this work is presented. The comprehensive and systematic review is based on the "Preferred Reporting Items for Systematic Review and Meta-Analysis" (PRISMA) approach. A total of 2540 papers were screened from leading research archives from 2016 to March 2021, and finally, 50 articles were selected for review. The major advantages, developments, distinctive architectural structure, components, technical challenges and possibilities in SHM are briefly discussed. A review of various recent cloud and fog computing based architectures, major ML implementation challenges, prospects and future trends is also presented. The survey primarily encourages the data driven predictive analytics aspects of healthcare and the development of ML models for health empowerment.}, } @article {pmid36459531, year = {2022}, author = {Truong, L and Ayora, F and D'Orsogna, L and Martinez, P and De Santis, D}, title = {Nanopore sequencing data analysis using Microsoft Azure cloud computing service.}, journal = {PloS one}, volume = {17}, number = {12}, pages = {e0278609}, pmid = {36459531}, issn = {1932-6203}, mesh = {Animals ; Cloud Computing ; *Nanopore Sequencing ; Data Analysis ; Data Accuracy ; *Mammoths ; }, abstract = {Genetic information provides insights into the exome, genome, epigenetics and structural organisation of the organism. Given the enormous amount of genetic information, scientists are able to perform mammoth tasks to improve the standard of health care such as determining genetic influences on outcome of allogeneic transplantation. Cloud based computing has increasingly become a key choice for many scientists, engineers and institutions as it offers on-demand network access and users can conveniently rent rather than buy all required computing resources. With the positive advancements of cloud computing and nanopore sequencing data output, we were motivated to develop an automated and scalable analysis pipeline utilizing cloud infrastructure in Microsoft Azure to accelerate HLA genotyping service and improve the efficiency of the workflow at lower cost. In this study, we describe (i) the selection process for suitable virtual machine sizes for computing resources to balance between the best performance versus cost effectiveness; (ii) the building of Docker containers to include all tools in the cloud computational environment; (iii) the comparison of HLA genotype concordance between the in-house manual method and the automated cloud-based pipeline to assess data accuracy. In conclusion, the Microsoft Azure cloud based data analysis pipeline was shown to meet all the key imperatives for performance, cost, usability, simplicity and accuracy. Importantly, the pipeline allows for the on-going maintenance and testing of version changes before implementation. This pipeline is suitable for the data analysis from MinION sequencing platform and could be adopted for other data analysis application processes.}, } @article {pmid36443470, year = {2022}, author = {Jang, H and Koh, H and Gu, W and Kang, B}, title = {Integrative web cloud computing and analytics using MiPair for design-based comparative analysis with paired microbiome data.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {20465}, pmid = {36443470}, issn = {2045-2322}, mesh = {Humans ; Cloud Computing ; *Microbiota ; *Gastrointestinal Microbiome ; Mouth ; Skin ; }, abstract = {Pairing (or blocking) is a design technique that is widely used in comparative microbiome studies to efficiently control for the effects of potential confounders (e.g., genetic, environmental, or behavioral factors). Some typical paired (block) designs for human microbiome studies are repeated measures designs that profile each subject's microbiome twice (or more than twice) (1) for pre and post treatments to see the effects of a treatment on microbiome, or (2) for different organs of the body (e.g., gut, mouth, skin) to see the disparity in microbiome between (or across) body sites. Researchers have developed a sheer number of web-based tools for user-friendly microbiome data processing and analytics, though there is no web-based tool currently available for such paired microbiome studies. In this paper, we thus introduce an integrative web-based tool, named MiPair, for design-based comparative analysis with paired microbiome data. MiPair is a user-friendly web cloud service that is built with step-by-step data processing and analytic procedures for comparative analysis between (or across) groups or between baseline and other groups. MiPair employs parametric and non-parametric tests for complete or incomplete block designs to perform comparative analyses with respect to microbial ecology (alpha- and beta-diversity) and taxonomy (e.g., phylum, class, order, family, genus, species). We demonstrate its usage through an example clinical trial on the effects of antibiotics on gut microbiome. MiPair is an open-source software that can be run on our web server (http://mipair.micloud.kr) or on user's computer (https://github.com/yj7599/mipairgit).}, } @article {pmid36439763, year = {2022}, author = {Fouotsa Manfouo, NC and Von Fintel, D}, title = {Investigating the effects of drought and lockdowns on smallholder and commercial agricultural production in KwaZulu-Natal using remotely sensed data.}, journal = {Heliyon}, volume = {8}, number = {11}, pages = {e11637}, pmid = {36439763}, issn = {2405-8440}, abstract = {Not many efforts have been made so far to understand the effects of both the 2015-2016 drought and the 2020 lockdown measures on the agricultural production of smallholder vis-a-vis commercial farmers in Kwazulu-Natal. Google Earth Engine, and random forest algorithm, are used to generate a dataset that help to investigate this question. A regression is performed on double differenced data to investigate the effects of interest. A k-mean cluster analysis, is also used to determine whether the distribution patterns of crop production changed with drought and disruption of agricultural production input. Results show that: (1) droughts affected the agricultural production of both areas similarly. Crop cover declined in both areas for one season after droughts were broken. Then recovery was driven by greener, more productive crops rather than the expansion of crop area. (2) The response of both areas to the COVID-19 lockdown was also similar. Both smallholder and commercial areas' Normalised Difference Vegetation Index - a proxy for crop vitality - improved in response to regulations favourable to the sector and improved rainfall. No significant adjustments in crop cover were observed. Production therefore changed primarily at the intensive margin (improved productivity of existing croplands) rather than the extensive (changing the extent of land under cultivation). (3) Cluster analysis allows for a more granular view, showing that the positive impact of lockdowns on agriculture were concentrated in areas with high rainfall and close proximity to metropolitan markets. Both smallholder and commercial farmers therefore are reliant on market access together with favourable environmental conditions for improved production.}, } @article {pmid36438442, year = {2022}, author = {Alzoubi, YI and Gill, A and Mishra, A}, title = {A systematic review of the purposes of Blockchain and fog computing integration: classification and open issues.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {11}, number = {1}, pages = {80}, pmid = {36438442}, issn = {2192-113X}, abstract = {The fog computing concept was proposed to help cloud computing for the data processing of Internet of Things (IoT) applications. However, fog computing faces several challenges such as security, privacy, and storage. One way to address these challenges is to integrate blockchain with fog computing. There are several applications of blockchain-fog computing integration that have been proposed, recently, due to their lucrative benefits such as enhancing security and privacy. There is a need to systematically review and synthesize the literature on this topic of blockchain-fog computing integration. The purposes of integrating blockchain and fog computing were determined using a systematic literature review approach and tailored search criteria established from the research questions. In this research, 181 relevant papers were found and reviewed. The results showed that the authors proposed the combination of blockchain and fog computing for several purposes such as security, privacy, access control, and trust management. A lack of standards and laws may make it difficult for blockchain and fog computing to be integrated in the future, particularly in light of newly developed technologies like quantum computing and artificial intelligence. The findings of this paper serve as a resource for researchers and practitioners of blockchain-fog computing integration for future research and designs.}, } @article {pmid36433599, year = {2022}, author = {Trakadas, P and Masip-Bruin, X and Facca, FM and Spantideas, ST and Giannopoulos, AE and Kapsalis, NC and Martins, R and Bosani, E and Ramon, J and Prats, RG and Ntroulias, G and Lyridis, DV}, title = {A Reference Architecture for Cloud-Edge Meta-Operating Systems Enabling Cross-Domain, Data-Intensive, ML-Assisted Applications: Architectural Overview and Key Concepts.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {22}, pages = {}, pmid = {36433599}, issn = {1424-8220}, support = {PID2021-124463OB-100//Spanish Ministry of Science, Innovation and Universities and FEDER/ ; }, mesh = {*Ecosystem ; *Software ; }, abstract = {Future data-intensive intelligent applications are required to traverse across the cloud-to-edge-to-IoT continuum, where cloud and edge resources elegantly coordinate, alongside sensor networks and data. However, current technical solutions can only partially handle the data outburst associated with the IoT proliferation experienced in recent years, mainly due to their hierarchical architectures. In this context, this paper presents a reference architecture of a meta-operating system (RAMOS), targeted to enable a dynamic, distributed and trusted continuum which will be capable of facilitating the next-generation smart applications at the edge. RAMOS is domain-agnostic, capable of supporting heterogeneous devices in various network environments. Furthermore, the proposed architecture possesses the ability to place the data at the origin in a secure and trusted manner. Based on a layered structure, the building blocks of RAMOS are thoroughly described, and the interconnection and coordination between them is fully presented. Furthermore, illustration of how the proposed reference architecture and its characteristics could fit in potential key industrial and societal applications, which in the future will require more power at the edge, is provided in five practical scenarios, focusing on the distributed intelligence and privacy preservation principles promoted by RAMOS, as well as the concept of environmental footprint minimization. Finally, the business potential of an open edge ecosystem and the societal impacts of climate net neutrality are also illustrated.}, } @article {pmid36433575, year = {2022}, author = {Bin Mofidul, R and Alam, MM and Rahman, MH and Jang, YM}, title = {Real-Time Energy Data Acquisition, Anomaly Detection, and Monitoring System: Implementation of a Secured, Robust, and Integrated Global IIoT Infrastructure with Edge and Cloud AI.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {22}, pages = {}, pmid = {36433575}, issn = {1424-8220}, mesh = {*Internet of Things ; Artificial Intelligence ; Reproducibility of Results ; Computers ; Electrocardiography ; }, abstract = {The industrial internet of things (IIoT), a leading technology to digitize industrial sectors and applications, requires the integration of edge and cloud computing, cyber security, and artificial intelligence to enhance its efficiency, reliability, and sustainability. However, the collection of heterogeneous data from individual sensors as well as monitoring and managing large databases with sufficient security has become a concerning issue for the IIoT framework. The development of a smart and integrated IIoT infrastructure can be a possible solution that can efficiently handle the aforementioned issues. This paper proposes an AI-integrated, secured IIoT infrastructure incorporating heterogeneous data collection and storing capability, global inter-communication, and a real-time anomaly detection model. To this end, smart data acquisition devices are designed and developed through which energy data are transferred to the edge IIoT servers. Hash encoding credentials and transport layer security protocol are applied to the servers. Furthermore, these servers can exchange data through a secured message queuing telemetry transport protocol. Edge and cloud databases are exploited to handle big data. For detecting the anomalies of individual electrical appliances in real-time, an algorithm based on a group of isolation forest models is developed and implemented on edge and cloud servers as well. In addition, remote-accessible online dashboards are implemented, enabling users to monitor the system. Overall, this study covers hardware design; the development of open-source IIoT servers and databases; the implementation of an interconnected global networking system; the deployment of edge and cloud artificial intelligence; and the development of real-time monitoring dashboards. Necessary performance results are measured, and they demonstrate elaborately investigating the feasibility of the proposed IIoT framework at the end.}, } @article {pmid36433564, year = {2022}, author = {Umoren, O and Singh, R and Awan, S and Pervez, Z and Dahal, K}, title = {Blockchain-Based Secure Authentication with Improved Performance for Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {22}, pages = {}, pmid = {36433564}, issn = {1424-8220}, mesh = {*Blockchain ; Computer Security ; Cloud Computing ; *Internet of Things ; Algorithms ; }, abstract = {Advancement in the Internet of Things (IoT) and cloud computing has escalated the number of connected edge devices in a smart city environment. Having billions more devices has contributed to security concerns, and an attack-proof authentication mechanism is the need of the hour to sustain the IoT environment. Securing all devices could be a huge task and require lots of computational power, and can be a bottleneck for devices with fewer computational resources. To improve the authentication mechanism, many researchers have proposed decentralized applications such as blockchain technology for securing fog and IoT environments. Ethereum is considered a popular blockchain platform and is used by researchers to implement the authentication mechanism due to its programable smart contract. In this research, we proposed a secure authentication mechanism with improved performance. Neo blockchain is a platform that has properties that can provide improved security and faster execution. The research utilizes the intrinsic properties of Neo blockchain to develop a secure authentication mechanism. The proposed authentication mechanism is compared with the existing algorithms and shows that the proposed mechanism is 20 to 90 per cent faster in execution time and has over 30 to 70 per cent decrease in registration and authentication when compared to existing methods.}, } @article {pmid36433381, year = {2022}, author = {Yang, J and Lee, TY and Lee, WT and Xu, L}, title = {A Design and Application of Municipal Service Platform Based on Cloud-Edge Collaboration for Smart Cities.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {22}, pages = {}, pmid = {36433381}, issn = {1424-8220}, support = {ZZ2021J23//the Zhangzhou Municipal Natural Science Foundation/ ; 2020J01813//the Fujian Province Nature Science Foundation/ ; NSCL-KF2021-07//the Opening Foundation of Fujian Provincial Key Laboratory of Network Security and Cryptology Research Fund, Fujian Normal University/ ; FBJG20210070//the Research Project on Education and Teaching Reform of Undergraduate Colleges and Universities in Fujian Province/ ; }, mesh = {Cities ; *Artificial Intelligence ; *Cloud Computing ; Computers ; Game Theory ; }, abstract = {Information and Communication Technology (ICT) makes cities "smart", capable of providing advanced municipal services to citizens more efficiently. In the literature, many applications of municipal service platform based on cloud computing and edge computing have been proposed, but the reference model and application instance based on cloud-edge collaboration specially for municipal service platform is rarely studied. In this context, this paper first develops a reference model, including resource collaboration, application collaboration, service collaboration, and security collaboration, and discusses the main contents and challenges of each part. Then, aiming at the problem of computing and communication resources allocation in the cloud-edge collaboration, a game-theory-based dynamic resource allocation model is introduced. Finally, an e-government self-service system based on the cloud-edge collaboration is designed and implemented. The cloud side is a cloud computing server, and the edge side are the self-service terminals integrating various edge computing devices with Artificial Intelligence (AI) embedded. The experimental results show that the designed system combines the advantages of cloud computing and edge computing, and provides a better user experience with lower processing latency, larger bandwidth, and more concurrent tasks. Meanwhile, the findings show that the evolutionary equilibrium and the Nash equilibrium are the optimal solutions, respectively.}, } @article {pmid36433374, year = {2022}, author = {Mir, TS and Liaqat, HB and Kiren, T and Sana, MU and Alvarez, RM and Miró, Y and Pascual Barrera, AE and Ashraf, I}, title = {Antifragile and Resilient Geographical Information System Service Delivery in Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {22}, pages = {}, pmid = {36433374}, issn = {1424-8220}, support = {N/A//European University of the Atlantic/ ; }, mesh = {*Geographic Information Systems ; *Cloud Computing ; }, abstract = {The demand for cloud computing has drastically increased recently, but this paradigm has several issues due to its inherent complications, such as non-reliability, latency, lesser mobility support, and location-aware services. Fog computing can resolve these issues to some extent, yet it is still in its infancy. Despite several existing works, these works lack fault-tolerant fog computing, which necessitates further research. Fault tolerance enables the performing and provisioning of services despite failures and maintains anti-fragility and resiliency. Fog computing is highly diverse in terms of failures as compared to cloud computing and requires wide research and investigation. From this perspective, this study primarily focuses on the provision of uninterrupted services through fog computing. A framework has been designed to provide uninterrupted services while maintaining resiliency. The geographical information system (GIS) services have been deployed as a test bed which requires high computation, requires intensive resources in terms of CPU and memory, and requires low latency. Keeping different types of failures at different levels and their impacts on service failure and greater response time in mind, the framework was made anti-fragile and resilient at different levels. Experimental results indicate that during service interruption, the user state remains unaffected.}, } @article {pmid36433242, year = {2022}, author = {Daraghmi, YA and Daraghmi, EY and Daraghma, R and Fouchal, H and Ayaida, M}, title = {Edge-Fog-Cloud Computing Hierarchy for Improving Performance and Security of NB-IoT-Based Health Monitoring Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {22}, pages = {}, pmid = {36433242}, issn = {1424-8220}, support = {Palestine Technical University - Kadoorie//French Ministry for Europe and Foreign Affairs (MEAE), the French Ministry for Higher Education, Research and Innovation (MESRI), and by the Consulate General of France in Jerusalem./ ; }, mesh = {*Cloud Computing ; *Electrocardiography ; Algorithms ; Support Vector Machine ; }, abstract = {This paper proposes a three-computing-layer architecture consisting of Edge, Fog, and Cloud for remote health vital signs monitoring. The novelty of this architecture is in using the Narrow-Band IoT (NB-IoT) for communicating with a large number of devices and covering large areas with minimum power consumption. Additionally, the architecture reduces the communication delay as the edge layer serves the health terminal devices with initial decisions and prioritizes data transmission for minimizing congestion on base stations. The paper also investigates different authentication protocols for improving security while maintaining low computation and transmission time. For data analysis, different machine learning algorithms, such as decision tree, support vector machines, and logistic regression, are used on the three layers. The proposed architecture is evaluated using CloudSim, iFogSim, and ns3-NB-IoT on real data consisting of medical vital signs. The results show that the proposed architecture reduces the NB-IoT delay by 59.9%, the execution time by an average of 38.5%, and authentication time by 35.1% for a large number of devices. This paper concludes that the NB-IoT combined with edge, fog, and cloud computing can support efficient remote health monitoring for large devices and large areas.}, } @article {pmid36430048, year = {2022}, author = {Zhao, Z and Wang, Z and Garcia-Campayo, J and Perez, HM}, title = {The Dissemination Strategy of an Urban Smart Medical Tourism Image by Big Data Analysis Technology.}, journal = {International journal of environmental research and public health}, volume = {19}, number = {22}, pages = {}, pmid = {36430048}, issn = {1660-4601}, mesh = {Humans ; *Medical Tourism ; Tourism ; Data Analysis ; Big Data ; Reproducibility of Results ; Technology ; }, abstract = {The advanced level of medical care is closely related to the development and popularity of a city, and it will also drive the development of tourism. The smart urban medical system based on big data analysis technology can greatly facilitate people's lives and increase the flow of people in the city, which is of great significance to the city's tourism image dissemination and branding. The medical system, with eight layers of architecture including access, medical cloud service governance, the medical cloud service resource, the platform's public service, the platform's runtime service, infrastructure, and the overall security and monitoring system of the platform, is designed based on big data analysis technology. Chengdu city is taken as an example based on big data analysis technology to position the dissemination of an urban tourism image. Quantitative analysis and questionnaire methods are used to study the effect of urban smart medical system measurement and tourism image communication positioning based on big data analysis technology. The results show that the smart medical cloud service platform of the urban smart medical system, as a public information service system, supports users in obtaining medical services through various terminal devices without geographical restrictions. The smart medical cloud realizes service aggregation and data sharing compared to the traditional isolated medical service system. Cloud computing has been used as the technical basis, making the scalability and reliability of the system have unprecedented improvements. This paper discusses how to effectively absorb, understand, and use tools in the big data environment, extract information from data, find effective information, make image communication activities accurate, reduce the cost, and improve the efficiency of city image communication. The research shows that big data analysis technology improves patients' medical experience, improves medical efficiency, and alleviates urban medical resource allocation to a certain extent. This technology improves people's satisfaction with the dissemination of urban tourism images, makes urban tourism image dissemination activities accurate, reduces the cost of urban tourism image dissemination, and improves the efficiency of urban tourism image dissemination. The combination of the two can provide a reference for developing urban smart medical care and disseminating a tourism image.}, } @article {pmid36429833, year = {2022}, author = {Li, H and Ou, D and Ji, Y}, title = {An Environmentally Sustainable Software-Defined Networking Data Dissemination Method for Mixed Traffic Flows in RSU Clouds with Energy Restriction.}, journal = {International journal of environmental research and public health}, volume = {19}, number = {22}, pages = {}, pmid = {36429833}, issn = {1660-4601}, mesh = {*Computer Communication Networks ; *Software ; Programming, Linear ; Algorithms ; Physical Phenomena ; }, abstract = {The connected multi road side unit (RSU) environment can be envisioned as the RSU cloud. In this paper, the Software-Defined Networking (SDN) framework is utilized to dynamically reconfigure the RSU clouds for the mixed traffic flows with energy restrictions, which are composed of five categories of vehicles with distinctive communication demands. An environmentally sustainable SDN data dissemination method for safer and greener transportation solutions is thus proposed, aiming to achieve the lowest overall SDN cloud delay with the least working hosts and minimum energy consumption, which is a mixed integer linear programming problem (MILP). To solve the problem, Joint optimization algorithms with Finite resources (JF) in three hyperparameters versions, JF (DW = 0.3, HW = 0.7), JF (DW = 0.5, HW = 0.5) and JF (DW = 0.7, HW = 0.3), were proposed, which are in contrast with single-objective optimization algorithms, the Host Optimization (H) algorithm, and the Delay optimization (D) algorithm. Results show that JF (DW = 0.3, HW = 0.7) and JF (DW = 0.5, HW = 0.5), when compared with the D algorithm, usually had slightly larger cloud delays, but fewer working hosts and energy consumptions, which has vital significance for enhancing energy efficiency and environmental protection, and shows the superiority of JFs over the D algorithm. Meanwhile, the H algorithm had the least working hosts and fewest energy consumptions under the same conditions, but completely ignored the explosive surge of delay, which is not desirable for most cases of the SDN RSU cloud. Further analysis showed that the larger the network topology of the SDN cloud, the harder it was to find a feasible network configuration. Therefore, when designing an environmentally sustainable SDN RSU cloud for the greener future mobility of intelligent transportation systems, its size should be limited or partitioned into a relatively small topology.}, } @article {pmid36417024, year = {2023}, author = {Cohen, RY and Sodickson, AD}, title = {An Orchestration Platform that Puts Radiologists in the Driver's Seat of AI Innovation: a Methodological Approach.}, journal = {Journal of digital imaging}, volume = {36}, number = {2}, pages = {700-714}, pmid = {36417024}, issn = {1618-727X}, mesh = {Humans ; *Artificial Intelligence ; Radiologists ; *Radiology/methods ; Machine Learning ; Diagnostic Imaging ; }, abstract = {Current AI-driven research in radiology requires resources and expertise that are often inaccessible to small and resource-limited labs. The clinicians who are able to participate in AI research are frequently well-funded, well-staffed, and either have significant experience with AI and computing, or have access to colleagues or facilities that do. Current imaging data is clinician-oriented and is not easily amenable to machine learning initiatives, resulting in inefficient, time consuming, and costly efforts that rely upon a crew of data engineers and machine learning scientists, and all too often preclude radiologists from driving AI research and innovation. We present the system and methodology we have developed to address infrastructure and platform needs, while reducing the staffing and resource barriers to entry. We emphasize a data-first and modular approach that streamlines the AI development and deployment process while providing efficient and familiar interfaces for radiologists, such that they can be the drivers of new AI innovations.}, } @article {pmid36415683, year = {2022}, author = {Xie, Y and Li, P and Nedjah, N and Gupta, BB and Taniar, D and Zhang, J}, title = {Privacy protection framework for face recognition in edge-based Internet of Things.}, journal = {Cluster computing}, volume = {}, number = {}, pages = {1-19}, pmid = {36415683}, issn = {1386-7857}, abstract = {Edge computing (EC) gets the Internet of Things (IoT)-based face recognition systems out of trouble caused by limited storage and computing resources of local or mobile terminals. However, data privacy leak remains a concerning problem. Previous studies only focused on some stages of face data processing, while this study focuses on the privacy protection of face data throughout its entire life cycle. Therefore, we propose a general privacy protection framework for edge-based face recognition (EFR) systems. To protect the privacy of face images and training models transmitted between edges and the remote cloud, we design a local differential privacy (LDP) algorithm based on the proportion difference of feature information. In addition, we also introduced identity authentication and hash technology to ensure the legitimacy of the terminal device and the integrity of the face image in the data acquisition phase. Theoretical analysis proves the rationality and feasibility of the scheme. Compared with the non-privacy protection situation and the equal privacy budget allocation method, our method achieves the best balance between availability and privacy protection in the numerical experiment.}, } @article {pmid36410105, year = {2023}, author = {Aguilar, B and Abdilleh, K and Acquaah-Mensah, GK}, title = {Multi-omics inference of differential breast cancer-related transcriptional regulatory network gene hubs between young Black and White patients.}, journal = {Cancer genetics}, volume = {270-271}, number = {}, pages = {1-11}, doi = {10.1016/j.cancergen.2022.11.001}, pmid = {36410105}, issn = {2210-7762}, mesh = {Humans ; Female ; Adult ; *Breast Neoplasms/genetics/metabolism ; Multiomics ; White ; Oncogenes ; *MicroRNAs/genetics ; Tumor Suppressor Proteins/genetics ; Ubiquitin-Protein Ligases/genetics ; }, abstract = {OBJECTIVE: Breast cancers (BrCA) are a leading cause of illness and mortality worldwide. Black women have a higher incidence rate relative to white women prior to age 40 years, and a lower incidence rate after 50 years. The objective of this study is to identify -omics differences between the two breast cancer cohorts to better understand the disparities observed in patient outcomes.

MATERIALS AND METHODS: Using Standard SQL, we queried ISB-CGC hosted Google BigQuery tables storing TCGA BrCA gene expression, methylation, and somatic mutation data and analyzed the combined multi-omics results using a variety of methods.

RESULTS: Among Stage II patients 50 years or younger, genes PIK3CA and CDH1 are more frequently mutated in White (W50) than in Black or African American patients (BAA50), while HUWE1, HYDIN, and FBXW7 mutations are more frequent in BAA50. Over-representation analysis (ORA) and Gene Set Enrichment Analysis (GSEA) results indicate that, among others, the Reactome Signaling by ROBO Receptors gene set is enriched in BAA50. Using the Virtual Inference of Protein-activity by Enriched Regulon analysis (VIPER) algorithm, putative top 20 master regulators identified include NUPR1, NFKBIL1, ZBTB17, TEAD1, EP300, TRAF6, CACTIN, and MID2. CACTIN and MID2 are of prognostic value. We identified driver genes, such as OTUB1, with suppressed expression whose DNA methylation status were inversely correlated with gene expression. Networks capturing microRNA and gene expression correlations identified notable microRNA hubs, such as miR-93 and miR-92a-2, expressed at higher levels in BAA50 than in W50.

DISCUSSION/CONCLUSION: The results point to several driver genes as being involved in the observed differences between the cohorts. The findings here form the basis for further mechanistic exploration.}, } @article {pmid36408731, year = {2023}, author = {Kucewicz, MT and Worrell, GA and Axmacher, N}, title = {Direct electrical brain stimulation of human memory: lessons learnt and future perspectives.}, journal = {Brain : a journal of neurology}, volume = {146}, number = {6}, pages = {2214-2226}, doi = {10.1093/brain/awac435}, pmid = {36408731}, issn = {1460-2156}, mesh = {Humans ; *Brain/physiology ; *Memory/physiology ; Mental Recall/physiology ; Electric Stimulation ; Cognition ; }, abstract = {Modulation of cognitive functions supporting human declarative memory is one of the grand challenges of neuroscience, and of vast importance for a variety of neuropsychiatric, neurodegenerative and neurodevelopmental diseases. Despite a recent surge of successful attempts at improving performance in a range of memory tasks, the optimal approaches and parameters for memory enhancement have yet to be determined. On a more fundamental level, it remains elusive as to how delivering electrical current in a given brain area leads to enhanced memory processing. Starting from the local and distal physiological effects on neural populations, the mechanisms of enhanced memory encoding, maintenance, consolidation or recall in response to direct electrical stimulation are only now being unravelled. With the advent of innovative neurotechnologies for concurrent recording and stimulation intracranially in the human brain, it becomes possible to study both acute and chronic effects of stimulation on memory performance and the underlying neural activities. In this review, we summarize the effects of various invasive stimulation approaches for modulating memory functions. We first outline the challenges that were faced in the initial studies of memory enhancement and the lessons learnt. Electrophysiological biomarkers are then reviewed as more objective measures of the stimulation effects than behavioural outcomes. Finally, we classify the various stimulation approaches into continuous and phasic modulation with an open or closed loop for responsive stimulation based on analysis of the recorded neural activities. Although the potential advantage of closed-loop responsive stimulation over the classic open-loop approaches is inconclusive, we foresee the emerging results from ongoing longitudinal studies and clinical trials will shed light on both the mechanisms and optimal strategies for improving declarative memory. Adaptive stimulation based on the biomarker analysis over extended periods of time is proposed as a future direction for obtaining lasting effects on memory functions. Chronic tracking and modulation of neural activities intracranially through adaptive stimulation opens tantalizing new avenues to continually monitor and treat memory and cognitive deficits in a range of brain disorders. Brain co-processors created with machine-learning tools and wireless bi-directional connectivity to seamlessly integrate implanted devices with smartphones and cloud computing are poised to enable real-time automated analysis of large data volumes and adaptively tune electrical stimulation based on electrophysiological biomarkers of behavioural states. Next-generation implantable devices for high-density recording and stimulation of electrophysiological activities, and technologies for distributed brain-computer interfaces are presented as selected future perspectives for modulating human memory and associated mental processes.}, } @article {pmid36408485, year = {2022}, author = {Al-Khafaji, HMR and Jaleel, RA}, title = {Adopting effective hierarchal IoMTs computing with K-efficient clustering to control and forecast COVID-19 cases.}, journal = {Computers & electrical engineering : an international journal}, volume = {104}, number = {}, pages = {108472}, pmid = {36408485}, issn = {0045-7906}, abstract = {The Internet of Medical Things (IoMTs) based on fog/cloud computing has been effectively proven to improve the controlling, monitoring, and care quality of Coronavirus disease 2019 (COVID-19) patients. One of the convenient approaches to assess symptomatic patients is to group patients with comparable symptoms and provide an overview of the required level of care to patients with similar conditions. Therefore, this study adopts an effective hierarchal IoMTs computing with K-Efficient clustering to control and forecast COVID-19 cases. The proposed system integrates the K-Means and K-Medoids clusterings to monitor the health status of patients, early detection of COVID-19 cases, and process data in real-time with ultra-low latency. In addition, the data analysis takes into account the primary requirements of the network to assist in understanding the nature of COVID-19. Based on the findings, the K-Efficient clustering with fog computing is a more effective approach to analyse the status of patients compared to that of K-Means and K-Medoids in terms of intra-class, inter-class, running time, the latency of network, and RAM consumption. In summary, the outcome of this study provides a novel approach for remote monitoring and handling of infected COVID-19 patients through real-time personalised treatment services.}, } @article {pmid36404909, year = {2022}, author = {Narasimha Raju, AS and Jayavel, K and Rajalakshmi, T}, title = {ColoRectalCADx: Expeditious Recognition of Colorectal Cancer with Integrated Convolutional Neural Networks and Visual Explanations Using Mixed Dataset Evidence.}, journal = {Computational and mathematical methods in medicine}, volume = {2022}, number = {}, pages = {8723957}, pmid = {36404909}, issn = {1748-6718}, mesh = {Humans ; Neural Networks, Computer ; Support Vector Machine ; Diagnosis, Computer-Assisted/methods ; Colonoscopy ; *Polyps ; *Colorectal Neoplasms/diagnostic imaging ; }, abstract = {Colorectal cancer typically affects the gastrointestinal tract within the human body. Colonoscopy is one of the most accurate methods of detecting cancer. The current system facilitates the identification of cancer by computer-assisted diagnosis (CADx) systems with a limited number of deep learning methods. It does not imply the depiction of mixed datasets for the functioning of the system. The proposed system, called ColoRectalCADx, is supported by deep learning (DL) models suitable for cancer research. The CADx system comprises five stages: convolutional neural networks (CNN), support vector machine (SVM), long short-term memory (LSTM), visual explanation such as gradient-weighted class activation mapping (Grad-CAM), and semantic segmentation phases. Here, the key components of the CADx system are equipped with 9 individual and 12 integrated CNNs, implying that the system consists mainly of investigational experiments with a total of 21 CNNs. In the subsequent phase, the CADx has a combination of CNNs of concatenated transfer learning functions associated with the machine SVM classification. Additional classification is applied to ensure effective transfer of results from CNN to LSTM. The system is mainly made up of a combination of CVC Clinic DB, Kvasir2, and Hyper Kvasir input as a mixed dataset. After CNN and LSTM, in advanced stage, malignancies are detected by using a better polyp recognition technique with Grad-CAM and semantic segmentation using U-Net. CADx results have been stored on Google Cloud for record retention. In these experiments, among all the CNNs, the individual CNN DenseNet-201 (87.1% training and 84.7% testing accuracies) and the integrated CNN ADaDR-22 (84.61% training and 82.17% testing accuracies) were the most efficient for cancer detection with the CNN+LSTM model. ColoRectalCADx accurately identifies cancer through individual CNN DesnseNet-201 and integrated CNN ADaDR-22. In Grad-CAM's visual explanations, CNN DenseNet-201 displays precise visualization of polyps, and CNN U-Net provides precise malignant polyps.}, } @article {pmid36395912, year = {2023}, author = {Xu, H and Yang, X and Wang, D and Hu, Y and Cheng, Z and Shi, Y and Zheng, P and Shi, L}, title = {Multivariate and spatio-temporal groundwater pollution risk assessment: A new long-time serial groundwater environmental impact assessment system.}, journal = {Environmental pollution (Barking, Essex : 1987)}, volume = {317}, number = {}, pages = {120621}, doi = {10.1016/j.envpol.2022.120621}, pmid = {36395912}, issn = {1873-6424}, mesh = {*Environmental Monitoring/methods ; *Groundwater ; Environmental Pollution ; Soil ; Risk Assessment/methods ; }, abstract = {Groundwater pollution risk assessment is an important part of environmental assessment. Although it has been developed for many years, there has not yet been a multi-dimensional method that takes into account long time series and spatial factors. We proposed a new method combines the advantages of remote sensing cloud computing, long-term groundwater modeling simulation and GIS technology to solve it efficiently. A coastal industrial park in Hainan was used as the study area. The depth of groundwater level, rainfall, topography and geomorphology, soil moisture, pollution source, pollution toxicity and other more than 10 parameters were used as the indexes. A comprehensive model with remote sensing cloud computing, DRASTIC model and Modflow + MT3DMS was established to assess the pollution risk from 2014 to 2021. The multi-year results indicated that the risk assessment of groundwater pollution was usually on the vertical coastal direction, and the risk increased from far away to near coast. With the discharge of pollutants in the industrial park, the pollution risk in the area 5 km away from the centre increased year by year until it became stable in 2019, and the risk in the centre of the park reached 1 level, covered an area of up to 145400 square metres, accounted for 0.012% of the whole study area. The assessment results in 2020 and 2021 fluctuate slightly compared with those in 2019. Therefore, in terms of groundwater resource protection and resource management, it is necessary to focus on the detection of pollution in the coastal zone and the pollution within 5 km of the centre to strictly control pollution discharge. In this study, the comprehensive assessment includes surface indicators, subsurface indicators, and pollutant indicators. Finally, we achieve a multivariate, spatial and long time series groundwater pollution risk assessment system, which is a new groundwater environmental impact assessment (GEIA) system.}, } @article {pmid36395210, year = {2022}, author = {Datta, S and Chakraborty, W and Radosavljevic, M}, title = {Toward attojoule switching energy in logic transistors.}, journal = {Science (New York, N.Y.)}, volume = {378}, number = {6621}, pages = {733-740}, doi = {10.1126/science.ade7656}, pmid = {36395210}, issn = {1095-9203}, abstract = {Advances in the theory of semiconductors in the 1930s in addition to the purification of germanium and silicon crystals in the 1940s enabled the point-contact junction transistor in 1947 and initiated the era of semiconductor electronics. Gordon Moore postulated 18 years later that the number of components in an integrated circuit would double every 1 to 2 years with associated reductions in cost per transistor. Transistor density doubling through scaling-the decrease of component sizes-with each new process node continues today, albeit at a slower pace compared with historical rates of scaling. Transistor scaling has resulted in exponential gain in performance and energy efficiency of integrated circuits, which transformed computing from mainframes to personal computers and from mobile computing to cloud computing. Innovations in new materials, transistor structures, and lithographic technologies will enable further scaling. Monolithic 3D integration, design technology co-optimization, alternative switching mechanisms, and cryogenic operation could enable further transistor scaling and improved energy efficiency in the foreseeable future.}, } @article {pmid36388591, year = {2022}, author = {Pei, J and Wang, L and Huang, H and Wang, L and Li, W and Wang, X and Yang, H and Cao, J and Fang, H and Niu, Z}, title = {Characterization and attribution of vegetation dynamics in the ecologically fragile South China Karst: Evidence from three decadal Landsat observations.}, journal = {Frontiers in plant science}, volume = {13}, number = {}, pages = {1043389}, pmid = {36388591}, issn = {1664-462X}, abstract = {Plant growth and its changes over space and time are effective indicators for signifying ecosystem health. However, large uncertainties remain in characterizing and attributing vegetation changes in the ecologically fragile South China Karst region, since most existing studies were conducted at a coarse spatial resolution or covered limited time spans. Considering the highly fragmented landscapes in the region, this hinders their capability in detecting fine information of vegetation dynamics taking place at local scales and comprehending the influence of climate change usually over relatively long temporal ranges. Here, we explored the spatiotemporal variations in vegetation greenness for the entire South China Karst region (1.9 million km[2]) at a resolution of 30m for the notably increased time span (1987-2018) using three decadal Landsat images and the cloud-based Google Earth Engine. Moreover, we spatially attributed the vegetation changes and quantified the relative contribution of driving factors. Our results revealed a widespread vegetation recovery in the South China Karst (74.80%) during the past three decades. Notably, the area of vegetation recovery tripled following the implementation of ecological engineering compared with the reference period (1987-1999). Meanwhile, the vegetation restoration trend was strongly sustainable beyond 2018 as demonstrated by the Hurst exponent. Furthermore, climate change contributed only one-fifth to vegetation restoration, whereas major vegetation recovery was highly attributable to afforestation projects, implying that anthropogenic influences accelerated vegetation greenness gains in karst areas since the start of the new millennium during which ecological engineering was continually established. Our study provides additional insights into ecological restoration and conservation in the highly heterogeneous karst landscapes and other similar ecologically fragile areas worldwide.}, } @article {pmid36387768, year = {2022}, author = {Noh, SK}, title = {Deep Learning System for Recycled Clothing Classification Linked to Cloud and Edge Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6854626}, pmid = {36387768}, issn = {1687-5273}, mesh = {Humans ; *Artificial Intelligence ; *Deep Learning ; Cloud Computing ; Automation ; Clothing ; }, abstract = {Recently, IT technologies related to the Fourth Industrial Revolution (4IR), such as artificial intelligence (AI), Internet of things (IoT), cloud computing, and edge computing have been studied. Although there are many used clothing occurrences with 61 trillion worn of clothing consumption per year in Korea, it is not properly collected due to the efficiency of the used clothing collection system, and the collected used clothing is not properly recycled due to insufficient recycling system, lack of skilled labor force, and health problems of workers. To solve this problem, this study proposes a deep learning clothing classification system (DLCCS) using cloud and edge computing. The system proposed is to classify clothing image data input from camera terminals installed in various clothing classification sites in various regions into two classes, as well as nine classes, by deep learning using convolution neural network (CNN). And the classification results are stored in the cloud through edge computing. The edge computing enables the analysis of the data of the Internet of Things (IoT) device on the edge of the network before transmitting it to the cloud. The performance evaluation parameters that are considered for the proposed research study are transmission velocity and latency. Proposed system can efficiently improve the process and automation in the classification and processing of recycled clothing in various places. It is also expected that the waste of clothing resources and health problems of clothing classification workers will be improved.}, } @article {pmid36374893, year = {2024}, author = {Nguyen, AD and Choi, S and Kim, W and Kim, J and Oh, H and Kang, J and Lee, S}, title = {Single-Image 3-D Reconstruction: Rethinking Point Cloud Deformation.}, journal = {IEEE transactions on neural networks and learning systems}, volume = {35}, number = {5}, pages = {6613-6627}, doi = {10.1109/TNNLS.2022.3211929}, pmid = {36374893}, issn = {2162-2388}, abstract = {Single-image 3-D reconstruction has long been a challenging problem. Recent deep learning approaches have been introduced to this 3-D area, but the ability to generate point clouds still remains limited due to inefficient and expensive 3-D representations, the dependency between the output and the number of model parameters, or the lack of a suitable computing operation. In this article, we present a novel deep-learning-based method to reconstruct a point cloud of an object from a single still image. The proposed method can be decomposed into two steps: feature fusion and deformation. The first step extracts both global and point-specific shape features from a 2-D object image, and then injects them into a randomly generated point cloud. In the second step, which is deformation, we introduce a new layer termed as GraphX that considers the interrelationship between points like common graph convolutions but operates on unordered sets. The framework can be applicable to realistic image data with background as we optionally learn a mask branch to segment objects from input images. To complement the quality of point clouds, we further propose an objective function to control the point uniformity. In addition, we introduce different variants of GraphX that cover from best performance to best memory budget. Moreover, the proposed model can generate an arbitrary-sized point cloud, which is the first deep method to do so. Extensive experiments demonstrate that we outperform the existing models and set a new height for different performance metrics in single-image 3-D reconstruction.}, } @article {pmid36366266, year = {2022}, author = {Kawa, J and Pyciński, B and Smoliński, M and Bożek, P and Kwasecki, M and Pietrzyk, B and Szymański, D}, title = {Design and Implementation of a Cloud PACS Architecture.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36366266}, issn = {1424-8220}, support = {POIR.01.01.01-00-0303/1//European Regional Development Fund/ ; }, mesh = {*Radiology Information Systems ; Cloud Computing ; Computers ; Software ; Tomography, X-Ray Computed ; }, abstract = {The limitations of the classic PACS (picture archiving and communication system), such as the backward-compatible DICOM network architecture and poor security and maintenance, are well-known. They are challenged by various existing solutions employing cloud-related patterns and services. However, a full-scale cloud-native PACS has not yet been demonstrated. The paper introduces a vendor-neutral cloud PACS architecture. It is divided into two main components: a cloud platform and an access device. The cloud platform is responsible for nearline (long-term) image archive, data flow, and backend management. It operates in multi-tenant mode. The access device is responsible for the local DICOM (Digital Imaging and Communications in Medicine) interface and serves as a gateway to cloud services. The cloud PACS was first implemented in an Amazon Web Services environment. It employs a number of general-purpose services designed or adapted for a cloud environment, including Kafka, OpenSearch, and Memcached. Custom services, such as a central PACS node, queue manager, or flow worker, also developed as cloud microservices, bring DICOM support, external integration, and a management layer. The PACS was verified using image traffic from, among others, computed tomography (CT), magnetic resonance (MR), and computed radiography (CR) modalities. During the test, the system was reliably storing and accessing image data. In following tests, scaling behavior differences between the monolithic Dcm4chee server and the proposed solution are shown. The growing number of parallel connections did not influence the monolithic server's overall throughput, whereas the performance of cloud PACS noticeably increased. In the final test, different retrieval patterns were evaluated to assess performance under different scenarios. The current production environment stores over 450 TB of image data and handles over 4000 DICOM nodes.}, } @article {pmid36366264, year = {2022}, author = {Kim, JK and Park, BS and Kim, W and Park, JT and Lee, S and Seo, YH}, title = {Robust Estimation and Optimized Transmission of 3D Feature Points for Computer Vision on Mobile Communication Network.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36366264}, issn = {1424-8220}, mesh = {*Algorithms ; *Vision, Ocular ; Computers ; }, abstract = {Due to the amount of transmitted data and the security of personal or private information in wireless communication, there are cases where the information for a multimedia service should be directly transferred from the user's device to the cloud server without the captured original images. This paper proposes a new method to generate 3D (dimensional) keypoints based on a user's mobile device with a commercial RGB camera in a distributed computing environment such as a cloud server. The images are captured with a moving camera and 2D keypoints are extracted from them. After executing feature extraction between continuous frames, disparities are calculated between frames using the relationships between matched keypoints. The physical distance of the baseline is estimated by using the motion information of the camera, and the actual distance is calculated by using the calculated disparity and the estimated baseline. Finally, 3D keypoints are generated by adding the extracted 2D keypoints to the calculated distance. A keypoint-based scene change method is proposed as well. Due to the existing similarity between continuous frames captured from a camera, not all 3D keypoints are transferred and stored, only the new ones. Compared with the ground truth of the TUM dataset, the average error of the estimated 3D keypoints was measured as 5.98 mm, which shows that the proposed method has relatively good performance considering that it uses a commercial RGB camera on a mobile device. Furthermore, the transferred 3D keypoints were decreased to about 73.6%.}, } @article {pmid36366095, year = {2022}, author = {Gonzalez-Compean, JL and Sosa-Sosa, VJ and Garcia-Hernandez, JJ and Galeana-Zapien, H and Reyes-Anastacio, HG}, title = {A Blockchain and Fingerprinting Traceability Method for Digital Product Lifecycle Management.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36366095}, issn = {1424-8220}, support = {41756//Consejo Nacional de Ciencia y Tecnología/ ; }, mesh = {*Blockchain ; Computer Security ; *Internet of Things ; Cloud Computing ; Technology ; }, abstract = {The rise of digitalization, sensory devices, cloud computing and internet of things (IoT) technologies enables the design of novel digital product lifecycle management (DPLM) applications for use cases such as manufacturing and delivery of digital products. The verification of the accomplishment/violations of agreements defined in digital contracts is a key task in digital business transactions. However, this verification represents a challenge when validating both the integrity of digital product content and the transactions performed during multiple stages of the DPLM. This paper presents a traceability method for DPLM based on the integration of online and offline verification mechanisms based on blockchain and fingerprinting, respectively. A blockchain lifecycle registration model is used for organizations to register the exchange of digital products in the cloud with partners and/or consumers throughout the DPLM stages as well as to verify the accomplishment of agreements at each DPLM stage. The fingerprinting scheme is used for offline verification of digital product integrity and to register the DPLM logs within digital products, which is useful in either dispute or violation of agreements scenarios. We built a DPLM service prototype based on this method, which was implemented as a cloud computing service. A case study based on the DPLM of audios was conducted to evaluate this prototype. The experimental evaluation revealed the ability of this method to be applied to DPLM in real scenarios in an efficient manner.}, } @article {pmid36366082, year = {2022}, author = {Hijji, M and Ahmad, B and Alam, G and Alwakeel, A and Alwakeel, M and Abdulaziz Alharbi, L and Aljarf, A and Khan, MU}, title = {Cloud Servers: Resource Optimization Using Different Energy Saving Techniques.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36366082}, issn = {1424-8220}, mesh = {*Cloud Computing ; Physical Phenomena ; *Workload ; }, abstract = {Currently, researchers are working to contribute to the emerging fields of cloud computing, edge computing, and distributed systems. The major area of interest is to examine and understand their performance. The major globally leading companies, such as Google, Amazon, ONLIVE, Giaki, and eBay, are truly concerned about the impact of energy consumption. These cloud computing companies use huge data centers, consisting of virtual computers that are positioned worldwide and necessitate exceptionally high-power costs to preserve. The increased requirement for energy consumption in IT firms has posed many challenges for cloud computing companies pertinent to power expenses. Energy utilization is reliant upon numerous aspects, for example, the service level agreement, techniques for choosing the virtual machine, the applied optimization strategies and policies, and kinds of workload. The present paper tries to provide an answer to challenges related to energy-saving through the assistance of both dynamic voltage and frequency scaling techniques for gaming data centers. Also, to evaluate both the dynamic voltage and frequency scaling techniques compared to non-power-aware and static threshold detection techniques. The findings will facilitate service suppliers in how to encounter the quality of service and experience limitations by fulfilling the service level agreements. For this purpose, the CloudSim platform is applied for the application of a situation in which game traces are employed as a workload for analyzing the procedure. The findings evidenced that an assortment of good quality techniques can benefit gaming servers to conserve energy expenditures and sustain the best quality of service for consumers located universally. The originality of this research presents a prospect to examine which procedure performs good (for example, dynamic, static, or non-power aware). The findings validate that less energy is utilized by applying a dynamic voltage and frequency method along with fewer service level agreement violations, and better quality of service and experience, in contrast with static threshold consolidation or non-power aware technique.}, } @article {pmid36366068, year = {2022}, author = {Baca, A and Dabnichki, P and Hu, CW and Kornfeind, P and Exel, J}, title = {Ubiquitous Computing in Sports and Physical Activity-Recent Trends and Developments.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36366068}, issn = {1424-8220}, mesh = {Humans ; Artificial Intelligence ; *Sports ; *Wearable Electronic Devices ; Exercise ; Athletes ; }, abstract = {The use of small, interconnected and intelligent tools within the broad framework of pervasive computing for analysis and assessments in sport and physical activity is not a trend in itself but defines a way for information to be handled, processed and utilised: everywhere, at any time. The demand for objective data to support decision making prompted the adoption of wearables that evolve to fulfil the aims of assessing athletes and practitioners as closely as possible with their performance environments. In the present paper, we mention and discuss the advancements in ubiquitous computing in sports and physical activity in the past 5 years. Thus, recent developments in wearable sensors, cloud computing and artificial intelligence tools have been the pillars for a major change in the ways sport-related analyses are performed. The focus of our analysis is wearable technology, computer vision solutions for markerless tracking and their major contribution to the process of acquiring more representative data from uninhibited actions in realistic ecological conditions. We selected relevant literature on the applications of such approaches in various areas of sports and physical activity while outlining some limitations of the present-day data acquisition and data processing practices and the resulting sensors' functionalities, as well as the limitations to the data-driven informed decision making in the current technological and scientific framework. Finally, we hypothesise that a continuous merger of measurement, processing and analysis will lead to the development of more reliable models utilising the advantages of open computing and unrestricted data access and allow for the development of personalised-medicine-type approaches to sport training and performance.}, } @article {pmid36366060, year = {2022}, author = {Niebla-Montero, Á and Froiz-Míguez, I and Fraga-Lamas, P and Fernández-Caramés, TM}, title = {Practical Latency Analysis of a Bluetooth 5 Decentralized IoT Opportunistic Edge Computing System for Low-Cost SBCs.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36366060}, issn = {1424-8220}, support = {PID2020-118857RA (ORBALLO)//MCIN/AEI/10.13039/50110001103/ ; ED431C 2020/15 and ED431G 2019/01//Xunta de Galicia and ERDF/ ; }, abstract = {IoT devices can be deployed almost anywhere, but they usually need to be connected to other IoT devices, either through the Internet or local area networks. For such communications, many IoT devices make use of wireless communications, whose coverage is key: if no coverage is available, an IoT device becomes isolated. This can happen both indoors (e.g., large buildings, industrial warehouses) or outdoors (e.g., rural areas, cities). To tackle such an issue, opportunistic networks can be useful, since they use gateways to provide services to IoT devices when they are in range (i.e., IoT devices take the opportunity of having a nearby gateway to exchange data or to use a computing service). Moreover, opportunistic networks can provide Edge Computing capabilities, thus creating Opportunistic Edge Computing (OEC) systems, which deploy smart gateways able to perform certain tasks faster than a remote Cloud. This article presents a novel decentralized OEC system based on Bluetooth 5 IoT nodes whose latency is evaluated to determine the feasibility of using it in practical applications. The obtained results indicate that, for the selected scenario, the average end-to-end latency is relatively low (736 ms), but it is impacted by factors such as the location of the bootstrap node, the smart gateway hardware or the use of high-security mechanisms.}, } @article {pmid36366028, year = {2022}, author = {Lo, SC and Tsai, HH}, title = {Design of 3D Virtual Reality in the Metaverse for Environmental Conservation Education Based on Cognitive Theory.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36366028}, issn = {1424-8220}, support = {MOST-109-2511-H-150-004//National Science and Technology Council, Taiwan/ ; SWCB-110-049//Soil and Water Conservation Bureau, Taiwan/ ; SWCB-111-052//Soil and Water Conservation Bureau, Taiwan/ ; }, mesh = {Humans ; *Virtual Reality ; *Computer-Assisted Instruction/methods ; Learning ; Cognition ; }, abstract = {BACKGROUND: Climate change causes devastating impacts with extreme weather conditions, such as flooding, polar ice caps melting, sea level rise, and droughts. Environmental conservation education is an important and ongoing project nowadays for all governments in the world. In this paper, a novel 3D virtual reality architecture in the metaverse (VRAM) is proposed to foster water resources education using modern information technology.

METHODS: A quasi-experimental study was performed to observe a comparison between learning involving VRAM and learning without VRAM. The 3D VRAM multimedia content comes from a picture book for learning environmental conservation concepts, based on the cognitive theory of multimedia learning to enhance human cognition. Learners wear VRAM helmets to run VRAM Android apps by entering the immersive environment for playing and/or interacting with 3D VRAM multimedia content in the metaverse. They shake their head to move the interaction sign to initiate interactive actions, such as replaying, going to consecutive video clips, displaying text annotations, and replying to questions when learning soil-and-water conservation course materials. Interactive portfolios of triggering actions are transferred to the cloud computing database immediately by the app.

RESULTS: Experimental results showed that participants who received instruction involving VRAM had significant improvement in their flow experience, learning motivation, learning interaction, self-efficacy, and presence in learning environmental conservation concepts.

CONCLUSIONS: The novel VRAM is highly suitable for multimedia educational systems. Moreover, learners' interactive VRAM portfolios can be analyzed by big-data analytics to understand behaviors for using VRAM in the future to improve the quality of environmental conservation education.}, } @article {pmid36365971, year = {2022}, author = {Na, D and Park, S}, title = {IoT-Chain and Monitoring-Chain Using Multilevel Blockchain for IoT Security.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36365971}, issn = {1424-8220}, support = {Keimyung University 2021//Keimyung University/ ; }, abstract = {In general, the Internet of Things (IoT) relies on centralized servers due to limited computing power and storage capacity. These server-based architectures have vulnerabilities such as DDoS attacks, single-point errors, and data forgery, and cannot guarantee stability and reliability. Blockchain technology can guarantee reliability and stability with a P2P network-based consensus algorithm and distributed ledger technology. However, it requires the high storage capacity of the existing blockchain and the computational power of the consensus algorithm. Therefore, blockchain nodes for IoT data management are maintained through an external cloud, an edge node. As a result, the vulnerability of the existing centralized structure cannot be guaranteed, and reliability cannot be guaranteed in the process of storing IoT data on the blockchain. In this paper, we propose a multi-level blockchain structure and consensus algorithm to solve the vulnerability. A multi-level blockchain operates on IoT devices, and there is an IoT chain layer that stores sensor data to ensure reliability. In addition, there is a hyperledger fabric-based monitoring chain layer that operates the access control for the metadata and data of the IoT chain to lighten the weight. We propose an export consensus method between the two blockchains, the Schnorr signature method, and a random-based lightweight consensus algorithm within the IoT-Chain. Experiments to measure the blockchain size, propagation time, consensus delay time, and transactions per second (TPS) were conducted using IoT. The blockchain did not exceed a certain size, and the delay time was reduced by 96% to 99% on average compared to the existing consensus algorithm. In the throughput tests, the maximum was 1701 TPS and the minimum was 1024 TPS.}, } @article {pmid36365871, year = {2022}, author = {Kaur, A and Singh, G and Kukreja, V and Sharma, S and Singh, S and Yoon, B}, title = {Adaptation of IoT with Blockchain in Food Supply Chain Management: An Analysis-Based Review in Development, Benefits and Potential Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36365871}, issn = {1424-8220}, support = {21163MFDS502//Ministry of Food and Drug Safety/ ; }, mesh = {*Blockchain ; *Internet of Things ; Food Supply ; Monitoring, Physiologic ; Technology ; }, abstract = {In today's scenario, blockchain technology is an emerging area and promising technology in the field of the food supply chain industry (FSCI). A literature survey comprising an analytical review of blockchain technology with the Internet of things (IoT) for food supply chain management (FSCM) is presented to better understand the associated research benefits, issues, and challenges. At present, with the concept of farm-to-fork gaining increasing popularity, food safety and quality certification are of critical concern. Blockchain technology provides the traceability of food supply from the source, i.e., the seeding factories, to the customer's table. The main idea of this paper is to identify blockchain technology with the Internet of things (IoT) devices to investigate the food conditions and various issues faced by transporters while supplying fresh food. Blockchain provides applications such as smart contracts to monitor, observe, and manage all transactions and communications among stakeholders. IoT technology provides approaches for verifying all transactions; these transactions are recorded and then stored in a centralized database system. Thus, IoT enables a safe and cost-effective FSCM system for stakeholders. In this paper, we contribute to the awareness of blockchain applications that are relevant to the food supply chain (FSC), and we present an analysis of the literature on relevant blockchain applications which has been conducted concerning various parameters. The observations in the present survey are also relevant to the application of blockchain technology with IoT in other areas.}, } @article {pmid36365848, year = {2022}, author = {Shamshad, S and Riaz, F and Riaz, R and Rizvi, SS and Abdulla, S}, title = {An Enhanced Architecture to Resolve Public-Key Cryptographic Issues in the Internet of Things (IoT), Employing Quantum Computing Supremacy.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36365848}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) strongly influences the world economy; this emphasizes the importance of securing all four aspects of the IoT model: sensors, networks, cloud, and applications. Considering the significant value of public-key cryptography threats on IoT system confidentiality, it is vital to secure it. One of the potential candidates to assist in securing public key cryptography in IoT is quantum computing. Although the notion of IoT and quantum computing convergence is not new, it has been referenced in various works of literature and covered by many scholars. Quantum computing eliminates most of the challenges in IoT. This research provides a comprehensive introduction to the Internet of Things and quantum computing before moving on to public-key cryptography difficulties that may be encountered across the convergence of quantum computing and IoT. An enhanced architecture is then proposed for resolving these public-key cryptography challenges using SimuloQron to implement the BB84 protocol for quantum key distribution (QKD) and one-time pad (OTP). The proposed model prevents eavesdroppers from performing destructive operations in the communication channel and cyber side by preserving its state and protecting the public key using quantum cryptography and the BB84 protocol. A modified version is introduced for this IoT situation. A traditional cryptographic mechanism called "one-time pad" (OTP) is employed in hybrid management.}, } @article {pmid36357557, year = {2022}, author = {Tuli, S and Casale, G and Jennings, NR}, title = {SimTune: bridging the simulator reality gap for resource management in edge-cloud computing.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {19158}, pmid = {36357557}, issn = {2045-2322}, mesh = {Humans ; *Cloud Computing ; Computer Simulation ; }, abstract = {Industries and services are undergoing an Internet of Things centric transformation globally, giving rise to an explosion of multi-modal data generated each second. This, with the requirement of low-latency result delivery, has led to the ubiquitous adoption of edge and cloud computing paradigms. Edge computing follows the data gravity principle, wherein the computational devices move closer to the end-users to minimize data transfer and communication times. However, large-scale computation has exacerbated the problem of efficient resource management in hybrid edge-cloud platforms. In this regard, data-driven models such as deep neural networks (DNNs) have gained popularity to give rise to the notion of edge intelligence. However, DNNs face significant problems of data saturation when fed volatile data. Data saturation is when providing more data does not translate to improvements in performance. To address this issue, prior work has leveraged coupled simulators that, akin to digital twins, generate out-of-distribution training data alleviating the data-saturation problem. However, simulators face the reality-gap problem, which is the inaccuracy in the emulation of real computational infrastructure due to the abstractions in such simulators. To combat this, we develop a framework, SimTune, that tackles this challenge by leveraging a low-fidelity surrogate model of the high-fidelity simulator to update the parameters of the latter, so to increase the simulation accuracy. This further helps co-simulated methods to generalize to edge-cloud configurations for which human encoded parameters are not known apriori. Experiments comparing SimTune against state-of-the-art data-driven resource management solutions on a real edge-cloud platform demonstrate that simulator tuning can improve quality of service metrics such as energy consumption and response time by up to 14.7% and 7.6% respectively.}, } @article {pmid36351936, year = {2022}, author = {Benhammou, Y and Alcaraz-Segura, D and Guirado, E and Khaldi, R and Achchab, B and Herrera, F and Tabik, S}, title = {Sentinel2GlobalLULC: A Sentinel-2 RGB image tile dataset for global land use/cover mapping with deep learning.}, journal = {Scientific data}, volume = {9}, number = {1}, pages = {681}, pmid = {36351936}, issn = {2052-4463}, abstract = {Land-Use and Land-Cover (LULC) mapping is relevant for many applications, from Earth system and climate modelling to territorial and urban planning. Global LULC products are continuously developing as remote sensing data and methods grow. However, there still exists low consistency among LULC products due to low accuracy in some regions and LULC types. Here, we introduce Sentinel2GlobalLULC, a Sentinel-2 RGB image dataset, built from the spatial-temporal consensus of up to 15 global LULC maps available in Google Earth Engine. Sentinel2GlobalLULC v2.1 contains 194877 single-class RGB image tiles organized into 29 LULC classes. Each image is a 224 × 224 pixels tile at 10 × 10 m resolution built as a cloud-free composite from Sentinel-2 images acquired between June 2015 and October 2020. Metadata includes a unique LULC annotation per image, together with level of consensus, reverse geo-referencing, global human modification index, and number of dates used in the composite. Sentinel2GlobalLULC is designed for training deep learning models aiming to build precise and robust global or regional LULC maps.}, } @article {pmid36350854, year = {2023}, author = {Zhang, X and Han, L and Sobeih, T and Han, L and Dempsey, N and Lechareas, S and Tridente, A and Chen, H and White, S and Zhang, D}, title = {CXR-Net: A Multitask Deep Learning Network for Explainable and Accurate Diagnosis of COVID-19 Pneumonia From Chest X-Ray Images.}, journal = {IEEE journal of biomedical and health informatics}, volume = {27}, number = {2}, pages = {980-991}, doi = {10.1109/JBHI.2022.3220813}, pmid = {36350854}, issn = {2168-2208}, mesh = {Humans ; *COVID-19/diagnostic imaging ; *Deep Learning ; X-Rays ; Thorax/diagnostic imaging ; *Pneumonia, Viral/diagnostic imaging ; COVID-19 Testing ; }, abstract = {Accurate and rapid detection of COVID-19 pneumonia is crucial for optimal patient treatment. Chest X-Ray (CXR) is the first-line imaging technique for COVID-19 pneumonia diagnosis as it is fast, cheap and easily accessible. Currently, many deep learning (DL) models have been proposed to detect COVID-19 pneumonia from CXR images. Unfortunately, these deep classifiers lack the transparency in interpreting findings, which may limit their applications in clinical practice. The existing explanation methods produce either too noisy or imprecise results, and hence are unsuitable for diagnostic purposes. In this work, we propose a novel explainable CXR deep neural Network (CXR-Net) for accurate COVID-19 pneumonia detection with an enhanced pixel-level visual explanation using CXR images. An Encoder-Decoder-Encoder architecture is proposed, in which an extra encoder is added after the encoder-decoder structure to ensure the model can be trained on category samples. The method has been evaluated on real world CXR datasets from both public and private sources, including healthy, bacterial pneumonia, viral pneumonia and COVID-19 pneumonia cases. The results demonstrate that the proposed method can achieve a satisfactory accuracy and provide fine-resolution activation maps for visual explanation in the lung disease detection. Compared to current state-of-the-art visual explanation methods, the proposed method can provide more detailed, high-resolution, visual explanation for the classification results. It can be deployed in various computing environments, including cloud, CPU and GPU environments. It has a great potential to be used in clinical practice for COVID-19 pneumonia diagnosis.}, } @article {pmid36335750, year = {2022}, author = {Tomassini, S and Sbrollini, A and Covella, G and Sernani, P and Falcionelli, N and Müller, H and Morettini, M and Burattini, L and Dragoni, AF}, title = {Brain-on-Cloud for automatic diagnosis of Alzheimer's disease from 3D structural magnetic resonance whole-brain scans.}, journal = {Computer methods and programs in biomedicine}, volume = {227}, number = {}, pages = {107191}, doi = {10.1016/j.cmpb.2022.107191}, pmid = {36335750}, issn = {1872-7565}, mesh = {Humans ; *Alzheimer Disease/diagnostic imaging/pathology ; Quality of Life ; Neuroimaging/methods ; Magnetic Resonance Imaging/methods ; Brain/diagnostic imaging/pathology ; Magnetic Resonance Spectroscopy ; *Cognitive Dysfunction ; }, abstract = {BACKGROUND AND OBJECTIVE: Alzheimer's disease accounts for approximately 70% of all dementia cases. Cortical and hippocampal atrophy caused by Alzheimer's disease can be appreciated easily from a T1-weighted structural magnetic resonance scan. Since a timely therapeutic intervention during the initial stages of the syndrome has a positive impact on both disease progression and quality of life of affected subjects, Alzheimer's disease diagnosis is crucial. Thus, this study relies on the development of a robust yet lightweight 3D framework, Brain-on-Cloud, dedicated to efficient learning of Alzheimer's disease-related features from 3D structural magnetic resonance whole-brain scans by improving our recent convolutional long short-term memory-based framework with the integration of a set of data handling techniques in addition to the tuning of the model hyper-parameters and the evaluation of its diagnostic performance on independent test data.

METHODS: For this objective, four serial experiments were conducted on a scalable GPU cloud service. They were compared and the hyper-parameters of the best experiment were tuned until reaching the best-performing configuration. In parallel, two branches were designed. In the first branch of Brain-on-Cloud, training, validation and testing were performed on OASIS-3. In the second branch, unenhanced data from ADNI-2 were employed as independent test set, and the diagnostic performance of Brain-on-Cloud was evaluated to prove its robustness and generalization capability. The prediction scores were computed for each subject and stratified according to age, sex and mini mental state examination.

RESULTS: In its best guise, Brain-on-Cloud is able to discriminate Alzheimer's disease with an accuracy of 92% and 76%, sensitivity of 94% and 82%, and area under the curve of 96% and 92% on OASIS-3 and independent ADNI-2 test data, respectively.

CONCLUSIONS: Brain-on-Cloud shows to be a reliable, lightweight and easily-reproducible framework for automatic diagnosis of Alzheimer's disease from 3D structural magnetic resonance whole-brain scans, performing well without segmenting the brain into its portions. Preserving the brain anatomy, its application and diagnostic ability can be extended to other cognitive disorders. Due to its cloud nature, computational lightness and fast execution, it can also be applied in real-time diagnostic scenarios providing prompt clinical decision support.}, } @article {pmid36321417, year = {2022}, author = {Golkar, A and Malekhosseini, R and RahimiZadeh, K and Yazdani, A and Beheshti, A}, title = {A priority queue-based telemonitoring system for automatic diagnosis of heart diseases in integrated fog computing environments.}, journal = {Health informatics journal}, volume = {28}, number = {4}, pages = {14604582221137453}, doi = {10.1177/14604582221137453}, pmid = {36321417}, issn = {1741-2811}, mesh = {Humans ; *Cloud Computing ; Delivery of Health Care ; *Heart Diseases ; }, abstract = {Various studies have shown the benefits of using distributed fog computing for healthcare systems. The new pattern of fog and edge computing reduces latency for data processing compared to cloud computing. Nevertheless, the proposed fog models still have many limitations in improving system performance and patients' response time.This paper, proposes a new performance model by integrating fog computing, priority queues and certainty theory into the Edge computing devices and validating it by analyzing heart disease patients' conditions in clinical decision support systems (CDSS). In this model, a Certainty Factor (CF) value is assigned to each symptom of heart disease. When one or more symptoms show an abnormal value, the patient's condition will be evaluated using CF values in the fog layer. In the fog layer, requests are categorized in different priority queues before arriving into the system. The results demonstrate that network usage, latency, and response time of patients' requests are respectively improved by 25.55%, 42.92%, and 34.28% compared to the cloud model. Prioritizing patient requests with respect to CF values in the CDSS provides higher system Quality of Service (QoS) and patients' response time.}, } @article {pmid36318260, year = {2023}, author = {Ament, SA and Adkins, RS and Carter, R and Chrysostomou, E and Colantuoni, C and Crabtree, J and Creasy, HH and Degatano, K and Felix, V and Gandt, P and Garden, GA and Giglio, M and Herb, BR and Khajouei, F and Kiernan, E and McCracken, C and McDaniel, K and Nadendla, S and Nickel, L and Olley, D and Orvis, J and Receveur, JP and Schor, M and Sonthalia, S and Tickle, TL and Way, J and Hertzano, R and Mahurkar, AA and White, OR}, title = {The Neuroscience Multi-Omic Archive: a BRAIN Initiative resource for single-cell transcriptomic and epigenomic data from the mammalian brain.}, journal = {Nucleic acids research}, volume = {51}, number = {D1}, pages = {D1075-D1085}, pmid = {36318260}, issn = {1362-4962}, support = {R01 DC019370/DC/NIDCD NIH HHS/United States ; R24 MH114788/MH/NIMH NIH HHS/United States ; R24 MH114815/MH/NIMH NIH HHS/United States ; UM1 DA052244/DA/NIDA NIH HHS/United States ; }, mesh = {Animals ; Mice ; *Epigenomics ; Genomics ; Mammals ; *Multiomics ; Primates ; *Transcriptome ; *Brain/cytology/metabolism ; *Databases, Genetic ; }, abstract = {Scalable technologies to sequence the transcriptomes and epigenomes of single cells are transforming our understanding of cell types and cell states. The Brain Research through Advancing Innovative Neurotechnologies (BRAIN) Initiative Cell Census Network (BICCN) is applying these technologies at unprecedented scale to map the cell types in the mammalian brain. In an effort to increase data FAIRness (Findable, Accessible, Interoperable, Reusable), the NIH has established repositories to make data generated by the BICCN and related BRAIN Initiative projects accessible to the broader research community. Here, we describe the Neuroscience Multi-Omic Archive (NeMO Archive; nemoarchive.org), which serves as the primary repository for genomics data from the BRAIN Initiative. Working closely with other BRAIN Initiative researchers, we have organized these data into a continually expanding, curated repository, which contains transcriptomic and epigenomic data from over 50 million brain cells, including single-cell genomic data from all of the major regions of the adult and prenatal human and mouse brains, as well as substantial single-cell genomic data from non-human primates. We make available several tools for accessing these data, including a searchable web portal, a cloud-computing interface for large-scale data processing (implemented on Terra, terra.bio), and a visualization and analysis platform, NeMO Analytics (nemoanalytics.org).}, } @article {pmid36316488, year = {2022}, author = {Prakash, AJ and Kumar, S and Behera, MD and Das, P and Kumar, A and Srivastava, PK}, title = {Impact of extreme weather events on cropland inundation over Indian subcontinent.}, journal = {Environmental monitoring and assessment}, volume = {195}, number = {1}, pages = {50}, pmid = {36316488}, issn = {1573-2959}, mesh = {*Extreme Weather ; Environmental Monitoring/methods ; Floods ; Crops, Agricultural ; Water ; Weather ; }, abstract = {Cyclonic storms and extreme precipitation lead to loss of lives and significant damage to land and property, crop productivity, etc. The "Gulab" cyclonic storm formed on the 24[th] of September 2021 in the Bay of Bengal (BoB), hit the eastern Indian coasts on the 26[th] of September and caused massive damage and water inundation. This study used Integrated Multi-satellite Retrievals for GPM (IMERG) satellite precipitation data for daily to monthly scale assessments focusing on the "Gulab" cyclonic event. The Otsu's thresholding approach was applied to Sentinel-1 data to map water inundation. Standardized Precipitation Index (SPI) was employed to analyze the precipitation deviation compared to the 20 years mean climatology across India from June to November 2021 on a monthly scale. The water-inundated areas were overlaid on a recent publicly available high-resolution land use land cover (LULC) map to demarcate crop area damage in four eastern Indian states such as Andhra Pradesh, Chhattisgarh, Odisha, and Telangana. The maximum water inundation and crop area damages were observed in Andhra Pradesh (~2700 km[2]), followed by Telangana (~2040 km[2]) and Odisha (~1132 km[2]), and the least in Chhattisgarh (~93.75 km[2]). This study has potential implications for an emergency response to extreme weather events, such as cyclones, extreme precipitation, and flood. The spatio-temporal data layers and rapid assessment methodology can be helpful to various users such as disaster management authorities, mitigation and response teams, and crop insurance scheme development. The relevant satellite data, products, and cloud-computing facility could operationalize systematic disaster monitoring under the rising threats of extreme weather events in the coming years.}, } @article {pmid36316226, year = {2022}, author = {Khosla, A and Sonu, and Awan, HTA and Singh, K and Gaurav, and Walvekar, R and Zhao, Z and Kaushik, A and Khalid, M and Chaudhary, V}, title = {Emergence of MXene and MXene-Polymer Hybrid Membranes as Future- Environmental Remediation Strategies.}, journal = {Advanced science (Weinheim, Baden-Wurttemberg, Germany)}, volume = {9}, number = {36}, pages = {e2203527}, pmid = {36316226}, issn = {2198-3844}, support = {STR-IRNGS-SET-GAMRG-01-2022//Sunway University/ ; }, mesh = {*Artificial Intelligence ; *Environmental Restoration and Remediation ; Machine Learning ; Polymers ; }, abstract = {The continuous deterioration of the environment due to extensive industrialization and urbanization has raised the requirement to devise high-performance environmental remediation technologies. Membrane technologies, primarily based on conventional polymers, are the most commercialized air, water, solid, and radiation-based environmental remediation strategies. Low stability at high temperatures, swelling in organic contaminants, and poor selectivity are the fundamental issues associated with polymeric membranes restricting their scalable viability. Polymer-metal-carbides and nitrides (MXenes) hybrid membranes possess remarkable physicochemical attributes, including strong mechanical endurance, high mechanical flexibility, superior adsorptive behavior, and selective permeability, due to multi-interactions between polymers and MXene's surface functionalities. This review articulates the state-of-the-art MXene-polymer hybrid membranes, emphasizing its fabrication routes, enhanced physicochemical properties, and improved adsorptive behavior. It comprehensively summarizes the utilization of MXene-polymer hybrid membranes for environmental remediation applications, including water purification, desalination, ion-separation, gas separation and detection, containment adsorption, and electromagnetic and nuclear radiation shielding. Furthermore, the review highlights the associated bottlenecks of MXene-Polymer hybrid-membranes and its possible alternate solutions to meet industrial requirements. Discussed are opportunities and prospects related to MXene-polymer membrane to devise intelligent and next-generation environmental remediation strategies with the integration of modern age technologies of internet-of-things, artificial intelligence, machine-learning, 5G-communication and cloud-computing are elucidated.}, } @article {pmid36304269, year = {2022}, author = {Raveendran, K and Freese, NH and Kintali, C and Tiwari, S and Bole, P and Dias, C and Loraine, AE}, title = {BioViz Connect: Web Application Linking CyVerse Cloud Resources to Genomic Visualization in the Integrated Genome Browser.}, journal = {Frontiers in bioinformatics}, volume = {2}, number = {}, pages = {764619}, pmid = {36304269}, issn = {2673-7647}, support = {R01 GM121927/GM/NIGMS NIH HHS/United States ; R35 GM139609/GM/NIGMS NIH HHS/United States ; }, abstract = {Genomics researchers do better work when they can interactively explore and visualize data. Due to the vast size of experimental datasets, researchers are increasingly using powerful, cloud-based systems to process and analyze data. These remote systems, called science gateways, offer user-friendly, Web-based access to high performance computing and storage resources, but typically lack interactive visualization capability. In this paper, we present BioViz Connect, a middleware Web application that links CyVerse science gateway resources to the Integrated Genome Browser (IGB), a highly interactive native application implemented in Java that runs on the user's personal computer. Using BioViz Connect, users can 1) stream data from the CyVerse data store into IGB for visualization, 2) improve the IGB user experience for themselves and others by adding IGB specific metadata to CyVerse data files, including genome version and track appearance, and 3) run compute-intensive visual analytics functions on CyVerse infrastructure to create new datasets for visualization in IGB or other applications. To demonstrate how BioViz Connect facilitates interactive data visualization, we describe an example RNA-Seq data analysis investigating how heat and desiccation stresses affect gene expression in the model plant Arabidopsis thaliana. The RNA-Seq use case illustrates how interactive visualization with IGB can help a user identify problematic experimental samples, sanity-check results using a positive control, and create new data files for interactive visualization in IGB (or other tools) using a Docker image deployed to CyVerse via the Terrain API. Lastly, we discuss limitations of the technologies used and suggest opportunities for future work. BioViz Connect is available from https://bioviz.org.}, } @article {pmid36303792, year = {2021}, author = {Guérinot, C and Marcon, V and Godard, C and Blanc, T and Verdier, H and Planchon, G and Raimondi, F and Boddaert, N and Alonso, M and Sailor, K and Lledo, PM and Hajj, B and El Beheiry, M and Masson, JB}, title = {New Approach to Accelerated Image Annotation by Leveraging Virtual Reality and Cloud Computing.}, journal = {Frontiers in bioinformatics}, volume = {1}, number = {}, pages = {777101}, pmid = {36303792}, issn = {2673-7647}, abstract = {Three-dimensional imaging is at the core of medical imaging and is becoming a standard in biological research. As a result, there is an increasing need to visualize, analyze and interact with data in a natural three-dimensional context. By combining stereoscopy and motion tracking, commercial virtual reality (VR) headsets provide a solution to this critical visualization challenge by allowing users to view volumetric image stacks in a highly intuitive fashion. While optimizing the visualization and interaction process in VR remains an active topic, one of the most pressing issue is how to utilize VR for annotation and analysis of data. Annotating data is often a required step for training machine learning algorithms. For example, enhancing the ability to annotate complex three-dimensional data in biological research as newly acquired data may come in limited quantities. Similarly, medical data annotation is often time-consuming and requires expert knowledge to identify structures of interest correctly. Moreover, simultaneous data analysis and visualization in VR is computationally demanding. Here, we introduce a new procedure to visualize, interact, annotate and analyze data by combining VR with cloud computing. VR is leveraged to provide natural interactions with volumetric representations of experimental imaging data. In parallel, cloud computing performs costly computations to accelerate the data annotation with minimal input required from the user. We demonstrate multiple proof-of-concept applications of our approach on volumetric fluorescent microscopy images of mouse neurons and tumor or organ annotations in medical images.}, } @article {pmid36301785, year = {2023}, author = {Reani, Y and Bobrowski, O}, title = {Cycle Registration in Persistent Homology With Applications in Topological Bootstrap.}, journal = {IEEE transactions on pattern analysis and machine intelligence}, volume = {45}, number = {5}, pages = {5579-5593}, doi = {10.1109/TPAMI.2022.3217443}, pmid = {36301785}, issn = {1939-3539}, abstract = {We propose a novel approach for comparing the persistent homology representations of two spaces (or filtrations). Commonly used methods are based on numerical summaries such as persistence diagrams and persistence landscapes, along with suitable metrics (e.g., Wasserstein). These summaries are useful for computational purposes, but they are merely a marginal of the actual topological information that persistent homology can provide. Instead, our approach compares between two topological representations directly in the data space. We do so by defining a correspondence relation between individual persistent cycles of two different spaces, and devising a method for computing this correspondence. Our matching of cycles is based on both the persistence intervals and the spatial placement of each feature. We demonstrate our new framework in the context of topological inference, where we use statistical bootstrap methods in order to differentiate between real features and noise in point cloud data.}, } @article {pmid36299750, year = {2022}, author = {Li, X and You, K}, title = {Real-time tracking and detection of patient conditions in the intelligent m-Health monitoring system.}, journal = {Frontiers in public health}, volume = {10}, number = {}, pages = {922718}, pmid = {36299750}, issn = {2296-2565}, mesh = {Humans ; *Telemedicine ; }, abstract = {In order to help patients monitor their personal health in real time, this paper proposes an intelligent mobile health monitoring system and establishes a corresponding health network to track and process patients' physical activity and other health-related factors in real time. Performance was analyzed. The experimental results show that after comparing the accuracy, delay time, error range, efficiency, and energy utilization of Im-HMS and existing UCD systems, it is found that the accuracy of Im-HMS is mostly between 98 and 100%, while the accuracy of UCD is mostly between 98 and 100%. Most of the systems are between 91 and 97%; in terms of delay comparison, the delay of the Im-HMS system is between 18 and 39 ms, which is far lower than the lowest value of the UCD system of 84 ms, and the Im-HMS is significantly better than the existing UCD system; the error range of Im-HMS is mainly between 0.2 and 1.4, while the error range of UCD system is mainly between -2 and 14; and in terms of efficiency and energy utilization, Im-HMS values are higher than those of UCD system. In general, the Im-HMS system proposed in this study is more accurate than UCD system and has lower delay, smaller error, and higher efficiency, and energy utilization is more efficient than UCD system, which is of great significance for mobile health monitoring in practical applications.}, } @article {pmid36299577, year = {2022}, author = {Yu, L and Yu, PS and Duan, Y and Qiao, H}, title = {A resource scheduling method for reliable and trusted distributed composite services in cloud environment based on deep reinforcement learning.}, journal = {Frontiers in genetics}, volume = {13}, number = {}, pages = {964784}, pmid = {36299577}, issn = {1664-8021}, abstract = {With the vigorous development of Internet technology, applications are increasingly migrating to the cloud. Cloud, a distributed network environment, has been widely extended to many fields such as digital finance, supply chain management, and biomedicine. In order to meet the needs of the rapid development of the modern biomedical industry, the biological cloud platform is an inevitable choice for the integration and analysis of medical information. It improves the work efficiency of the biological information system and also realizes reliable and credible intelligent processing of biological resources. Cloud services in bioinformatics are mainly for the processing of biological data, such as the analysis and processing of genes, the testing and detection of human tissues and organs, and the storage and transportation of vaccines. Biomedical companies form a data chain on the cloud, and they provide services and transfer data to each other to create composite services. Therefore, our motivation is to improve process efficiency of biological cloud services. Users' business requirements have become complicated and diversified, which puts forward higher requirements for service scheduling strategies in cloud computing platforms. In addition, deep reinforcement learning shows strong perception and continuous decision-making capabilities in automatic control problems, which provides a new idea and method for solving the service scheduling and resource allocation problems in the cloud computing field. Therefore, this paper designs a composite service scheduling model under the containers instance mode which hybrids reservation and on-demand. The containers in the cluster are divided into two instance modes: reservation and on-demand. A composite service is described as a three-level structure: a composite service consists of multiple services, and a service consists of multiple service instances, where the service instance is the minimum scheduling unit. In addition, an improved Deep Q-Network (DQN) algorithm is proposed and applied to the scheduling algorithm of composite services. The experimental results show that applying our improved DQN algorithm to the composite services scheduling problem in the container cloud environment can effectively reduce the completion time of the composite services. Meanwhile, the method improves Quality of Service (QoS) and resource utilization in the container cloud environment.}, } @article {pmid36298902, year = {2022}, author = {Zhang, Y and Wu, Z and Lin, P and Pan, Y and Wu, Y and Zhang, L and Huangfu, J}, title = {Hand gestures recognition in videos taken with a lensless camera.}, journal = {Optics express}, volume = {30}, number = {22}, pages = {39520-39533}, doi = {10.1364/OE.470324}, pmid = {36298902}, issn = {1094-4087}, mesh = {*Gestures ; *Pattern Recognition, Automated/methods ; Algorithms ; Neural Networks, Computer ; }, abstract = {A lensless camera is an imaging system that uses a mask in place of a lens, making it thinner, lighter, and less expensive than a lensed camera. However, additional complex computation and time are required for image reconstruction. This work proposes a deep learning model named Raw3dNet that recognizes hand gestures directly on raw videos captured by a lensless camera without the need for image restoration. In addition to conserving computational resources, the reconstruction-free method provides privacy protection. Raw3dNet is a novel end-to-end deep neural network model for the recognition of hand gestures in lensless imaging systems. It is created specifically for raw video captured by a lensless camera and has the ability to properly extract and combine temporal and spatial features. The network is composed of two stages: 1. spatial feature extractor (SFE), which enhances the spatial features of each frame prior to temporal convolution; 2. 3D-ResNet, which implements spatial and temporal convolution of video streams. The proposed model achieves 98.59% accuracy on the Cambridge Hand Gesture dataset in the lensless optical experiment, which is comparable to the lensed-camera result. Additionally, the feasibility of physical object recognition is assessed. Further, we show that the recognition can be achieved with respectable accuracy using only a tiny portion of the original raw data, indicating the potential for reducing data traffic in cloud computing scenarios.}, } @article {pmid36298422, year = {2022}, author = {Amin, F and Abbasi, R and Mateen, A and Ali Abid, M and Khan, S}, title = {A Step toward Next-Generation Advancements in the Internet of Things Technologies.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {20}, pages = {}, pmid = {36298422}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) devices generate a large amount of data over networks; therefore, the efficiency, complexity, interfaces, dynamics, robustness, and interaction need to be re-examined on a large scale. This phenomenon will lead to seamless network connectivity and the capability to provide support for the IoT. The traditional IoT is not enough to provide support. Therefore, we designed this study to provide a systematic analysis of next-generation advancements in the IoT. We propose a systematic catalog that covers the most recent advances in the traditional IoT. An overview of the IoT from the perspectives of big data, data science, and network science disciplines and also connecting technologies is given. We highlight the conceptual view of the IoT, key concepts, growth, and most recent trends. We discuss and highlight the importance and the integration of big data, data science, and network science along with key applications such as artificial intelligence, machine learning, blockchain, federated learning, etc. Finally, we discuss various challenges and issues of IoT such as architecture, integration, data provenance, and important applications such as cloud and edge computing, etc. This article will provide aid to the readers and other researchers in an understanding of the IoT's next-generation developments and tell how they apply to the real world.}, } @article {pmid36298408, year = {2022}, author = {Farag, MM}, title = {Matched Filter Interpretation of CNN Classifiers with Application to HAR.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {20}, pages = {}, pmid = {36298408}, issn = {1424-8220}, support = {GRANT1610//King Faisal University/ ; }, mesh = {Humans ; *Neural Networks, Computer ; *Human Activities ; Machine Learning ; Smartphone ; }, abstract = {Time series classification is an active research topic due to its wide range of applications and the proliferation of sensory data. Convolutional neural networks (CNNs) are ubiquitous in modern machine learning (ML) models. In this work, we present a matched filter (MF) interpretation of CNN classifiers accompanied by an experimental proof of concept using a carefully developed synthetic dataset. We exploit this interpretation to develop an MF CNN model for time series classification comprising a stack of a Conv1D layer followed by a GlobalMaxPooling layer acting as a typical MF for automated feature extraction and a fully connected layer with softmax activation for computing class probabilities. The presented interpretation enables developing superlight highly accurate classifier models that meet the tight requirements of edge inference. Edge inference is emerging research that addresses the latency, availability, privacy, and connectivity concerns of the commonly deployed cloud inference. The MF-based CNN model has been applied to the sensor-based human activity recognition (HAR) problem due to its significant importance in a broad range of applications. The UCI-HAR, WISDM-AR, and MotionSense datasets are used for model training and testing. The proposed classifier is tested and benchmarked on an android smartphone with average accuracy and F1 scores of 98% and 97%, respectively, which outperforms state-of-the-art HAR methods in terms of classification accuracy and run-time performance. The proposed model size is less than 150 KB, and the average inference time is less than 1 ms. The presented interpretation helps develop a better understanding of CNN operation and decision mechanisms. The proposed model is distinguished from related work by jointly featuring interpretability, high accuracy, and low computational cost, enabling its ready deployment on a wide set of mobile devices for a broad range of applications.}, } @article {pmid36298402, year = {2022}, author = {Munir, T and Akbar, MS and Ahmed, S and Sarfraz, A and Sarfraz, Z and Sarfraz, M and Felix, M and Cherrez-Ojeda, I}, title = {A Systematic Review of Internet of Things in Clinical Laboratories: Opportunities, Advantages, and Challenges.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {20}, pages = {}, pmid = {36298402}, issn = {1424-8220}, mesh = {*Internet of Things ; Computer Security ; Laboratories, Clinical ; Privacy ; Software ; }, abstract = {The Internet of Things (IoT) is the network of physical objects embedded with sensors, software, electronics, and online connectivity systems. This study explores the role of IoT in clinical laboratory processes; this systematic review was conducted adhering to the PRISMA Statement 2020 guidelines. We included IoT models and applications across preanalytical, analytical, and postanalytical laboratory processes. PubMed, Cochrane Central, CINAHL Plus, Scopus, IEEE, and A.C.M. Digital library were searched between August 2015 to August 2022; the data were tabulated. Cohen's coefficient of agreement was calculated to quantify inter-reviewer agreements; a total of 18 studies were included with Cohen's coefficient computed to be 0.91. The included studies were divided into three classifications based on availability, including preanalytical, analytical, and postanalytical. The majority (77.8%) of the studies were real-tested. Communication-based approaches were the most common (83.3%), followed by application-based approaches (44.4%) and sensor-based approaches (33.3%) among the included studies. Open issues and challenges across the included studies included scalability, costs and energy consumption, interoperability, privacy and security, and performance issues. In this study, we identified, classified, and evaluated IoT applicability in clinical laboratory systems. This study presents pertinent findings for IoT development across clinical laboratory systems, for which it is essential that more rigorous and efficient testing and studies be conducted in the future.}, } @article {pmid36298235, year = {2022}, author = {Velichko, A and Huyut, MT and Belyaev, M and Izotov, Y and Korzun, D}, title = {Machine Learning Sensors for Diagnosis of COVID-19 Disease Using Routine Blood Values for Internet of Things Application.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {20}, pages = {}, pmid = {36298235}, issn = {1424-8220}, support = {22-11-20040//Russian Science Foundation/ ; }, mesh = {Humans ; *Internet of Things ; *COVID-19/diagnosis ; Cholesterol, HDL ; Machine Learning ; Amylases ; Triglycerides ; }, abstract = {Healthcare digitalization requires effective applications of human sensors, when various parameters of the human body are instantly monitored in everyday life due to the Internet of Things (IoT). In particular, machine learning (ML) sensors for the prompt diagnosis of COVID-19 are an important option for IoT application in healthcare and ambient assisted living (AAL). Determining a COVID-19 infected status with various diagnostic tests and imaging results is costly and time-consuming. This study provides a fast, reliable and cost-effective alternative tool for the diagnosis of COVID-19 based on the routine blood values (RBVs) measured at admission. The dataset of the study consists of a total of 5296 patients with the same number of negative and positive COVID-19 test results and 51 routine blood values. In this study, 13 popular classifier machine learning models and the LogNNet neural network model were exanimated. The most successful classifier model in terms of time and accuracy in the detection of the disease was the histogram-based gradient boosting (HGB) (accuracy: 100%, time: 6.39 sec). The HGB classifier identified the 11 most important features (LDL, cholesterol, HDL-C, MCHC, triglyceride, amylase, UA, LDH, CK-MB, ALP and MCH) to detect the disease with 100% accuracy. In addition, the importance of single, double and triple combinations of these features in the diagnosis of the disease was discussed. We propose to use these 11 features and their binary combinations as important biomarkers for ML sensors in the diagnosis of the disease, supporting edge computing on Arduino and cloud IoT service.}, } @article {pmid36298158, year = {2022}, author = {Merone, M and Graziosi, A and Lapadula, V and Petrosino, L and d'Angelis, O and Vollero, L}, title = {A Practical Approach to the Analysis and Optimization of Neural Networks on Embedded Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {20}, pages = {}, pmid = {36298158}, issn = {1424-8220}, mesh = {*Artificial Intelligence ; *Neural Networks, Computer ; Cloud Computing ; Algorithms ; Computers ; }, abstract = {The exponential increase in internet data poses several challenges to cloud systems and data centers, such as scalability, power overheads, network load, and data security. To overcome these limitations, research is focusing on the development of edge computing systems, i.e., based on a distributed computing model in which data processing occurs as close as possible to where the data are collected. Edge computing, indeed, mitigates the limitations of cloud computing, implementing artificial intelligence algorithms directly on the embedded devices enabling low latency responses without network overhead or high costs, and improving solution scalability. Today, the hardware improvements of the edge devices make them capable of performing, even if with some constraints, complex computations, such as those required by Deep Neural Networks. Nevertheless, to efficiently implement deep learning algorithms on devices with limited computing power, it is necessary to minimize the production time and to quickly identify, deploy, and, if necessary, optimize the best Neural Network solution. This study focuses on developing a universal method to identify and port the best Neural Network on an edge system, valid regardless of the device, Neural Network, and task typology. The method is based on three steps: a trade-off step to obtain the best Neural Network within different solutions under investigation; an optimization step to find the best configurations of parameters under different acceleration techniques; eventually, an explainability step using local interpretable model-agnostic explanations (LIME), which provides a global approach to quantify the goodness of the classifier decision criteria. We evaluated several MobileNets on the Fudan Shangai-Tech dataset to test the proposed approach.}, } @article {pmid36298065, year = {2022}, author = {Torrisi, F and Amato, E and Corradino, C and Mangiagli, S and Del Negro, C}, title = {Characterization of Volcanic Cloud Components Using Machine Learning Techniques and SEVIRI Infrared Images.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {20}, pages = {}, pmid = {36298065}, issn = {1424-8220}, support = {OB.FU. 0867.010//INGV/ ; }, mesh = {Humans ; Atmosphere ; Gases ; Machine Learning ; *Volcanic Eruptions ; }, abstract = {Volcanic explosive eruptions inject several different types of particles and gasses into the atmosphere, giving rise to the formation and propagation of volcanic clouds. These can pose a serious threat to the health of people living near an active volcano and cause damage to air traffic. Many efforts have been devoted to monitor and characterize volcanic clouds. Satellite infrared (IR) sensors have been shown to be well suitable for volcanic cloud monitoring tasks. Here, a machine learning (ML) approach was developed in Google Earth Engine (GEE) to detect a volcanic cloud and to classify its main components using satellite infrared images. We implemented a supervised support vector machine (SVM) algorithm to segment a combination of thermal infrared (TIR) bands acquired by the geostationary MSG-SEVIRI (Meteosat Second Generation-Spinning Enhanced Visible and Infrared Imager). This ML algorithm was applied to some of the paroxysmal explosive events that occurred at Mt. Etna between 2020 and 2022. We found that the ML approach using a combination of TIR bands from the geostationary satellite is very efficient, achieving an accuracy of 0.86, being able to properly detect, track and map automatically volcanic ash clouds in near real-time.}, } @article {pmid36294134, year = {2022}, author = {Li, Z}, title = {Forecasting Weekly Dengue Cases by Integrating Google Earth Engine-Based Risk Predictor Generation and Google Colab-Based Deep Learning Modeling in Fortaleza and the Federal District, Brazil.}, journal = {International journal of environmental research and public health}, volume = {19}, number = {20}, pages = {}, pmid = {36294134}, issn = {1660-4601}, support = {42061134019//National Natural Science Foundation of China/ ; QYZDB-SSW-DQC005//Key Research Program of Frontier Sciences of the Chinese Academy of Sciences/ ; E0V00110YZ//Institute of Geographic Sciences and Natural Resources Research, Chinese Acad-emy of Sciences/ ; }, mesh = {Humans ; Brazil/epidemiology ; *Dengue/epidemiology ; *Deep Learning ; Artificial Intelligence ; Search Engine ; Forecasting ; }, abstract = {Efficient and accurate dengue risk prediction is an important basis for dengue prevention and control, which faces challenges, such as downloading and processing multi-source data to generate risk predictors and consuming significant time and computational resources to train and validate models locally. In this context, this study proposed a framework for dengue risk prediction by integrating big geospatial data cloud computing based on Google Earth Engine (GEE) platform and artificial intelligence modeling on the Google Colab platform. It enables defining the epidemiological calendar, delineating the predominant area of dengue transmission in cities, generating the data of risk predictors, and defining multi-date ahead prediction scenarios. We implemented the experiments based on weekly dengue cases during 2013-2020 in the Federal District and Fortaleza, Brazil to evaluate the performance of the proposed framework. Four predictors were considered, including total rainfall (Rsum), mean temperature (Tmean), mean relative humidity (RHmean), and mean normalized difference vegetation index (NDVImean). Three models (i.e., random forest (RF), long-short term memory (LSTM), and LSTM with attention mechanism (LSTM-ATT)), and two modeling scenarios (i.e., modeling with or without dengue cases) were set to implement 1- to 4-week ahead predictions. A total of 24 models were built, and the results showed in general that LSTM and LSTM-ATT models outperformed RF models; modeling could benefit from using historical dengue cases as one of the predictors, and it makes the predicted curve fluctuation more stable compared with that only using climate and environmental factors; attention mechanism could further improve the performance of LSTM models. This study provides implications for future dengue risk prediction in terms of the effectiveness of GEE-based big geospatial data processing for risk predictor generation and Google Colab-based risk modeling and presents the benefits of using historical dengue data as one of the input features and the attention mechanism for LSTM modeling.}, } @article {pmid36293656, year = {2022}, author = {Alenoghena, CO and Onumanyi, AJ and Ohize, HO and Adejo, AO and Oligbi, M and Ali, SI and Okoh, SA}, title = {eHealth: A Survey of Architectures, Developments in mHealth, Security Concerns and Solutions.}, journal = {International journal of environmental research and public health}, volume = {19}, number = {20}, pages = {}, pmid = {36293656}, issn = {1660-4601}, support = {TETF/ES/DR\&D-CE/NRF2020/SET1/67/VOL.1.//Tertiary Education Trust Fund, Nigeria/ ; }, mesh = {Humans ; Pandemics ; *COVID-19/epidemiology ; *Telemedicine ; Technology ; }, abstract = {The ramifications of the COVID-19 pandemic have contributed in part to a recent upsurge in the study and development of eHealth systems. Although it is almost impossible to cover all aspects of eHealth in a single discussion, three critical areas have gained traction. These include the need for acceptable eHealth architectures, the development of mobile health (mHealth) technologies, and the need to address eHealth system security concerns. Existing survey articles lack a synthesis of the most recent advancements in the development of architectures, mHealth solutions, and innovative security measures, which are essential components of effective eHealth systems. Consequently, the present article aims at providing an encompassing survey of these three aspects towards the development of successful and efficient eHealth systems. Firstly, we discuss the most recent innovations in eHealth architectures, such as blockchain-, Internet of Things (IoT)-, and cloud-based architectures, focusing on their respective benefits and drawbacks while also providing an overview of how they might be implemented and used. Concerning mHealth and security, we focus on key developments in both areas while discussing other critical topics of importance for eHealth systems. We close with a discussion of the important research challenges and potential future directions as they pertain to architecture, mHealth, and security concerns. This survey gives a comprehensive overview, including the merits and limitations of several possible technologies for the development of eHealth systems. This endeavor offers researchers and developers a quick snapshot of the information necessary during the design and decision-making phases of the eHealth system development lifecycle. Furthermore, we conclude that building a unified architecture for eHealth systems would require combining several existing designs. It also points out that there are still a number of problems to be solved, so more research and investment are needed to develop and deploy functional eHealth systems.}, } @article {pmid36280715, year = {2022}, author = {Schubert, PJ and Dorkenwald, S and Januszewski, M and Klimesch, J and Svara, F and Mancu, A and Ahmad, H and Fee, MS and Jain, V and Kornfeld, J}, title = {SyConn2: dense synaptic connectivity inference for volume electron microscopy.}, journal = {Nature methods}, volume = {19}, number = {11}, pages = {1367-1370}, pmid = {36280715}, issn = {1548-7105}, support = {RF1 MH117809/MH/NIMH NIH HHS/United States ; }, mesh = {Microscopy, Electron ; *Connectome ; Synapses ; Neurons ; Brain ; }, abstract = {The ability to acquire ever larger datasets of brain tissue using volume electron microscopy leads to an increasing demand for the automated extraction of connectomic information. We introduce SyConn2, an open-source connectome analysis toolkit, which works with both on-site high-performance compute environments and rentable cloud computing clusters. SyConn2 was tested on connectomic datasets with more than 10 million synapses, provides a web-based visualization interface and makes these data amenable to complex anatomical and neuronal connectivity queries.}, } @article {pmid36275963, year = {2022}, author = {Zhang, Y and Geng, P}, title = {Multi-Task Assignment Method of the Cloud Computing Platform Based on Artificial Intelligence.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {1789490}, pmid = {36275963}, issn = {1687-5273}, mesh = {*Cloud Computing ; *Artificial Intelligence ; Bayes Theorem ; Algorithms ; Big Data ; }, abstract = {To realize load balancing of cloud computing platforms in big data processing, the method of finding the optimal load balancing physical host in the algorithm cycle is adopted at present. This optimal load balancing strategy that overly focuses on the current deployment problem has certain limitations. It will make the system less efficient and the user's waiting time unnecessarily prolonged. This paper proposes a task assignment method for long-term resource load balancing of cloud platforms based on artificial intelligence and big data (TABAI). The maximum posterior probability for each physical host is calculated using Bayesian theory. Euler's formula is used to calculate the similarity between the host with the largest posterior probability and other hosts as a threshold. The hosts are classified according to the threshold to determine the optimal cluster and then form the final set of candidate physical hosts. It improves the resource utilization and external service capability of the cloud platform by combining cluster analysis with Bayes' theorem to achieve global load balancing in the time dimension. The experimental results show that: TABAI has a smaller processing time than the traditional load balancing multi-task assignment method. When the time is >600 s, the standard deviation of TABAI decreases to a greater extent, and it has stronger external service capabilities.}, } @article {pmid36274993, year = {2022}, author = {Yentes, JM and Liu, WY and Zhang, K and Markvicka, E and Rennard, SI}, title = {Updated Perspectives on the Role of Biomechanics in COPD: Considerations for the Clinician.}, journal = {International journal of chronic obstructive pulmonary disease}, volume = {17}, number = {}, pages = {2653-2675}, pmid = {36274993}, issn = {1178-2005}, support = {L30 HL129255/HL/NHLBI NIH HHS/United States ; }, mesh = {Humans ; Biomechanical Phenomena ; *Pulmonary Disease, Chronic Obstructive/diagnosis ; Gait/physiology ; Walking ; Walking Speed ; }, abstract = {Patients with chronic obstructive pulmonary disease (COPD) demonstrate extra-pulmonary functional decline such as an increased prevalence of falls. Biomechanics offers insight into functional decline by examining mechanics of abnormal movement patterns. This review discusses biomechanics of functional outcomes, muscle mechanics, and breathing mechanics in patients with COPD as well as future directions and clinical perspectives. Patients with COPD demonstrate changes in their postural sway during quiet standing compared to controls, and these deficits are exacerbated when sensory information (eg, eyes closed) is manipulated. If standing balance is disrupted with a perturbation, patients with COPD are slower to return to baseline and their muscle activity is differential from controls. When walking, patients with COPD appear to adopt a gait pattern that may increase stability (eg, shorter and wider steps, decreased gait speed) in addition to altered gait variability. Biomechanical muscle mechanics (ie, tension, extensibility, elasticity, and irritability) alterations with COPD are not well documented, with relatively few articles investigating these properties. On the other hand, dyssynchronous motion of the abdomen and rib cage while breathing is well documented in patients with COPD. Newer biomechanical technologies have allowed for estimation of regional, compartmental, lung volumes during activity such as exercise, as well as respiratory muscle activation during breathing. Future directions of biomechanical analyses in COPD are trending toward wearable sensors, big data, and cloud computing. Each of these offers unique opportunities as well as challenges. Advanced analytics of sensor data can offer insight into the health of a system by quantifying complexity or fluctuations in patterns of movement, as healthy systems demonstrate flexibility and are thus adaptable to changing conditions. Biomechanics may offer clinical utility in prediction of 30-day readmissions, identifying disease severity, and patient monitoring. Biomechanics is complementary to other assessments, capturing what patients do, as well as their capability.}, } @article {pmid36274815, year = {2023}, author = {Bonino da Silva Santos, LO and Ferreira Pires, L and Graciano Martinez, V and Rebelo Moreira, JL and Silva Souza Guizzardi, R}, title = {Personal Health Train Architecture with Dynamic Cloud Staging.}, journal = {SN computer science}, volume = {4}, number = {1}, pages = {14}, pmid = {36274815}, issn = {2661-8907}, abstract = {Scientific advances, especially in the healthcare domain, can be accelerated by making data available for analysis. However, in traditional data analysis systems, data need to be moved to a central processing unit that performs analyses, which may be undesirable, e.g. due to privacy regulations in case these data contain personal information. This paper discusses the Personal Health Train (PHT) approach in which data processing is brought to the (personal health) data rather than the other way around, allowing (private) data accessed to be controlled, and to observe ethical and legal concerns. This paper introduces the PHT architecture and discusses the data staging solution that allows processing to be delegated to components spawned in a private cloud environment in case the (health) organisation hosting the data has limited resources to execute the required processing. This paper shows the feasibility and suitability of the solution with a relatively simple, yet representative, case study of data analysis of Covid-19 infections, which is performed by components that are created on demand and run in the Amazon Web Services platform. This paper also shows that the performance of our solution is acceptable, and that our solution is scalable. This paper demonstrates that the PHT approach enables data analysis with controlled access, preserving privacy and complying with regulations such as GDPR, while the solution is deployed in a private cloud environment.}, } @article {pmid36269974, year = {2022}, author = {Proctor, T and Seritan, S and Rudinger, K and Nielsen, E and Blume-Kohout, R and Young, K}, title = {Scalable Randomized Benchmarking of Quantum Computers Using Mirror Circuits.}, journal = {Physical review letters}, volume = {129}, number = {15}, pages = {150502}, doi = {10.1103/PhysRevLett.129.150502}, pmid = {36269974}, issn = {1079-7114}, abstract = {The performance of quantum gates is often assessed using some form of randomized benchmarking. However, the existing methods become infeasible for more than approximately five qubits. Here we show how to use a simple and customizable class of circuits-randomized mirror circuits-to perform scalable, robust, and flexible randomized benchmarking of Clifford gates. We show that this technique approximately estimates the infidelity of an average many-qubit logic layer, and we use simulations of up to 225 qubits with physically realistic error rates in the range 0.1%-1% to demonstrate its scalability. We then use up to 16 physical qubits of a cloud quantum computing platform to demonstrate that our technique can reveal and quantify crosstalk errors in many-qubit circuits.}, } @article {pmid36269885, year = {2023}, author = {Matar, A and Hansson, M and Slokenberga, S and Panagiotopoulos, A and Chassang, G and Tzortzatou, O and Pormeister, K and Uhlin, E and Cardone, A and Beauvais, M}, title = {A proposal for an international Code of Conduct for data sharing in genomics.}, journal = {Developing world bioethics}, volume = {23}, number = {4}, pages = {344-357}, doi = {10.1111/dewb.12381}, pmid = {36269885}, issn = {1471-8847}, support = {741716//Horizon 2020 Framework Programme/ ; }, mesh = {Humans ; *Genomics ; *Information Dissemination ; Research Personnel ; }, abstract = {As genomic research becomes commonplace across the world, there is an increased need to coordinate practices among researchers, especially with regard to data sharing. One such way is an international code of conduct. In September 2020, an expert panel consisting of representatives from various fields convened to discuss a draft proposal formed via a synthesis of existing professional codes and other recommendations. This article presents an overview and analysis of the main issues related to international genomic research that were discussed by the expert panel, and the results of the discussion and follow up responses by the experts. As a result, the article presents as an annex a proposal for an international code of conduct for data sharing in genomics that is meant to establish best practices.}, } @article {pmid36268157, year = {2022}, author = {Asif, RN and Abbas, S and Khan, MA and Atta-Ur-Rahman, and Sultan, K and Mahmud, M and Mosavi, A}, title = {Development and Validation of Embedded Device for Electrocardiogram Arrhythmia Empowered with Transfer Learning.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {5054641}, pmid = {36268157}, issn = {1687-5273}, mesh = {Humans ; *Electrocardiography/methods ; *Arrhythmias, Cardiac/diagnosis ; Cloud Computing ; Machine Learning ; Software ; }, abstract = {With the emergence of the Internet of Things (IoT), investigation of different diseases in healthcare improved, and cloud computing helped to centralize the data and to access patient records throughout the world. In this way, the electrocardiogram (ECG) is used to diagnose heart diseases or abnormalities. The machine learning techniques have been used previously but are feature-based and not as accurate as transfer learning; the proposed development and validation of embedded device prove ECG arrhythmia by using the transfer learning (DVEEA-TL) model. This model is the combination of hardware, software, and two datasets that are augmented and fused and further finds the accuracy results in high proportion as compared to the previous work and research. In the proposed model, a new dataset is made by the combination of the Kaggle dataset and the other, which is made by taking the real-time healthy and unhealthy datasets, and later, the AlexNet transfer learning approach is applied to get a more accurate reading in terms of ECG signals. In this proposed research, the DVEEA-TL model diagnoses the heart abnormality in respect of accuracy during the training and validation stages as 99.9% and 99.8%, respectively, which is the best and more reliable approach as compared to the previous research in this field.}, } @article {pmid36268145, year = {2022}, author = {Han, Z and Li, F and Wang, G}, title = {Financial Data Mining Model Based on K-Truss Community Query Model and Artificial Intelligence.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9467623}, pmid = {36268145}, issn = {1687-5273}, mesh = {Humans ; *Artificial Intelligence ; *Data Mining ; Big Data ; Cloud Computing ; Algorithms ; }, abstract = {With the continuous development of Internet technology and related industries, emerging technologies such as big data and cloud computing have gradually integrated into and influenced social life. Emerging technologies have, to a large extent, revolutionized people's way of production and life and provided a lot of convenience for people's life. With the popularity of these technologies, information and data have also begun to explode. When we usually use an image storage system to process this information, we all know that an image contains countless pixels, and these pixels are interconnected to form the entire image. In real life, communities are like these pixels. On the Internet, communities are composed of interconnected parts. Nowadays, in various fields such as image modeling, we still have some problems, such as the problem of recognition rate, and we also found many problems when studying the community structure, which attracts more and more researchers, but the research on community query problems started late and the development is still relatively slow, so designing an excellent community query algorithm is a problem we urgently need to solve. With this goal, and based on previous research results, we have conducted in-depth discussions on community query algorithms, and hope that our research results can be applied to real life.}, } @article {pmid36267554, year = {2022}, author = {Jia, Z}, title = {Garden Landscape Design Method in Public Health Urban Planning Based on Big Data Analysis Technology.}, journal = {Journal of environmental and public health}, volume = {2022}, number = {}, pages = {2721247}, pmid = {36267554}, issn = {1687-9813}, mesh = {*Big Data ; *City Planning ; Gardens ; Public Health ; Data Analysis ; Technology ; }, abstract = {Aiming at the goal of high-quality development of the landscape architecture industry, we should actively promote the development and integration of digital, networked, and intelligent technologies and promote the intelligent and diversified development of the landscape architecture industry. Due to the limitation of drawing design technology and construction method, the traditional landscape architecture construction cannot really understand the public demands, and the construction scheme also relies on the experience and subjective aesthetics of professionals, resulting in improper connection between design and construction. At present, under the guidance of the national strategy, under the background of the rapid development of digital technologies such as 5G, big data, cloud computing, Internet of Things, and digital twins, the high integration of landscape architecture construction and digital technology has led to the transformation of the production mode of landscape architecture construction. Abundant professional data and convenient information processing platform enable landscape planners, designers, and builders to evaluate the whole life cycle of the project more scientifically and objectively and realize the digitalization of the whole process of investigation, analysis, design, construction, operation, and maintenance. For the landscape architecture industry, the significance of digital technology is not only to change the production tools but also to update the environmental awareness, design response, and construction methods, which makes the landscape architecture planning and design achieve the organic combination of qualitative and quantitative and also makes the landscape architecture discipline more scientific and rational. In this paper, the new method of combining grey relational degree with machine learning is used to provide new guidance for traditional landscape planning by using big data information in landscape design and has achieved very good results. The article analyzes the guidance of landscape architecture design under the big data in China and provides valuable reference for promoting the construction of landscape architecture in China.}, } @article {pmid36264891, year = {2022}, author = {Su, J and Su, K and Wang, S}, title = {Evaluation of digital economy development level based on multi-attribute decision theory.}, journal = {PloS one}, volume = {17}, number = {10}, pages = {e0270859}, pmid = {36264891}, issn = {1932-6203}, mesh = {Pregnancy ; Humans ; Female ; *Economic Development ; Artificial Intelligence ; Pandemics ; *COVID-19/epidemiology ; Decision Theory ; China ; }, abstract = {The maturity and commercialization of emerging digital technologies represented by artificial intelligence, cloud computing, block chain and virtual reality are giving birth to a new and higher economic form, that is, digital economy. Digital economy is different from the traditional industrial economy. It is clean, efficient, green and recyclable. It represents and promotes the future direction of global economic development, especially in the context of the sudden COVID-19 pandemic as a continuing disaster. Therefore, it is essential to establish the comprehensive evaluation model of digital economy development scientifically and reasonably. In this paper, first on the basis of literature analysis, the relevant indicators of digital economy development are collected manually and then screened by the grey dynamic clustering and rough set reduction theory. The evaluation index system of digital economy development is constructed from four dimensions: digital innovation impetus support, digital infrastructure construction support, national economic environment and digital policy guarantee, digital integration and application. Next the subjective weight and objective weight are calculated by the group FAHP method, entropy method and improved CRITIC method, and the combined weight is integrated with the thought of maximum variance. The grey correlation analysis and improved VIKOR model are combined to systematically evaluate the digital economy development level of 31 provinces and cities in China from 2013 to 2019. The results of empirical analysis show that the overall development of China's digital economy shows a trend of superposition and rise, and the development of digital economy in the four major economic zones is unbalanced. Finally, we put forward targeted opinions on the construction of China's provincial digital economy.}, } @article {pmid36264608, year = {2022}, author = {Moya-Galé, G and Walsh, SJ and Goudarzi, A}, title = {Automatic Assessment of Intelligibility in Noise in Parkinson Disease: Validation Study.}, journal = {Journal of medical Internet research}, volume = {24}, number = {10}, pages = {e40567}, pmid = {36264608}, issn = {1438-8871}, mesh = {Humans ; Dysarthria/etiology/complications ; *Parkinson Disease/complications ; Artificial Intelligence ; Speech Intelligibility ; *Speech Perception ; }, abstract = {BACKGROUND: Most individuals with Parkinson disease (PD) experience a degradation in their speech intelligibility. Research on the use of automatic speech recognition (ASR) to assess intelligibility is still sparse, especially when trying to replicate communication challenges in real-life conditions (ie, noisy backgrounds). Developing technologies to automatically measure intelligibility in noise can ultimately assist patients in self-managing their voice changes due to the disease.

OBJECTIVE: The goal of this study was to pilot-test and validate the use of a customized web-based app to assess speech intelligibility in noise in individuals with dysarthria associated with PD.

METHODS: In total, 20 individuals with dysarthria associated with PD and 20 healthy controls (HCs) recorded a set of sentences using their phones. The Google Cloud ASR API was used to automatically transcribe the speakers' sentences. An algorithm was created to embed speakers' sentences in +6-dB signal-to-noise multitalker babble. Results from ASR performance were compared to those from 30 listeners who orthographically transcribed the same set of sentences. Data were reduced into a single event, defined as a success if the artificial intelligence (AI) system transcribed a random speaker or sentence as well or better than the average of 3 randomly chosen human listeners. These data were further analyzed by logistic regression to assess whether AI success differed by speaker group (HCs or speakers with dysarthria) or was affected by sentence length. A discriminant analysis was conducted on the human listener data and AI transcriber data independently to compare the ability of each data set to discriminate between HCs and speakers with dysarthria.

RESULTS: The data analysis indicated a 0.8 probability (95% CI 0.65-0.91) that AI performance would be as good or better than the average human listener. AI transcriber success probability was not found to be dependent on speaker group. AI transcriber success was found to decrease with sentence length, losing an estimated 0.03 probability of transcribing as well as the average human listener for each word increase in sentence length. The AI transcriber data were found to offer the same discrimination of speakers into categories (HCs and speakers with dysarthria) as the human listener data.

CONCLUSIONS: ASR has the potential to assess intelligibility in noise in speakers with dysarthria associated with PD. Our results hold promise for the use of AI with this clinical population, although a full range of speech severity needs to be evaluated in future work, as well as the effect of different speaking tasks on ASR.}, } @article {pmid36259975, year = {2023}, author = {}, title = {Understanding enterprise data warehouses to support clinical and translational research: enterprise information technology relationships, data governance, workforce, and cloud computing.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {30}, number = {2}, pages = {407}, doi = {10.1093/jamia/ocac206}, pmid = {36259975}, issn = {1527-974X}, } @article {pmid36259009, year = {2022}, author = {Gendia, A}, title = {Cloud Based AI-Driven Video Analytics (CAVs) in Laparoscopic Surgery: A Step Closer to a Virtual Portfolio.}, journal = {Cureus}, volume = {14}, number = {9}, pages = {e29087}, pmid = {36259009}, issn = {2168-8184}, abstract = {AIMS: To outline the use of cloud-based artificial intelligence (AI)-driven video analytics (CAVs) in minimally invasive surgery and to propose their potential as a virtual portfolio for trainee and established surgeons. Methods: An independent online demonstration was requested from three platforms, namely Theator (Palo Alto, California, USA), Touch Surgery™ (Medtronic, London, England, UK), and C-SATS® (Seattle, Washington, USA). The assessed domains were online and app-based accessibility, the ability for timely trainee feedback, and AI integration for operation-specific steps and critical views.

RESULTS: The CAVs enable users to record surgeries with the advantage of limitless video storage through clouding and smart integration into theatre settings. This can be used to view surgeries and review trainee videos through a medium of communication and sharing with the ability to provide feedback. Theator and C-SATS® provide their users with surgical skills scoring systems with customizable options that can be used to provide structured feedback to trainees. Additionally, AI plays an important role in all three platforms by providing time-based analysis of steps and highlighting critical milestones.  Conclusion: Cloud-based AI-driven video analytics is an emerging new technology that enables users to store, analyze, and review videos. This technology has the potential to improve training, governance, and standardization procedures. Moreover, with the future adaptation of the technology, CAVs can be integrated into the trainees' portfolios as part of their virtual curriculum. This can enable a structured assessment of a surgeon's progression and degree of experience throughout their surgical career.}, } @article {pmid36258393, year = {2022}, author = {Yamamoto, Y and Shimobaba, T and Ito, T}, title = {HORN-9: Special-purpose computer for electroholography with the Hilbert transform.}, journal = {Optics express}, volume = {30}, number = {21}, pages = {38115-38127}, doi = {10.1364/OE.471720}, pmid = {36258393}, issn = {1094-4087}, abstract = {Holography is a technology that uses light interference and diffraction to record and reproduce three-dimensional (3D) information. Using computers, holographic 3D scenes (electroholography) have been widely studied. Nevertheless, its practical application requires enormous computing power, and current computers have limitations in real-time processing. In this study, we show that holographic reconstruction (HORN)-9, a special-purpose computer for electroholography with the Hilbert transform, can compute a 1, 920 × 1, 080-pixel computer-generated hologram from a point cloud of 65,000 points in 0.030 s (33 fps) on a single card. This performance is 8, 7, and 170 times more efficient than a previously developed HORN-8, a graphics processing unit, and a central processing unit (CPU), respectively. We also demonstrated the real-time processing and display of 400,000 points on multiple HORN-9s, achieving an acceleration of 600 times with four HORN-9 units compared with a single CPU.}, } @article {pmid36255917, year = {2022}, author = {Houskeeper, HF and Hooker, SB and Cavanaugh, KC}, title = {Spectrally simplified approach for leveraging legacy geostationary oceanic observations.}, journal = {Applied optics}, volume = {61}, number = {27}, pages = {7966-7977}, doi = {10.1364/AO.465491}, pmid = {36255917}, issn = {1539-4522}, mesh = {*Environmental Monitoring/methods ; *Ecosystem ; Satellite Imagery ; Oceans and Seas ; Water ; }, abstract = {The use of multispectral geostationary satellites to study aquatic ecosystems improves the temporal frequency of observations and mitigates cloud obstruction, but no operational capability presently exists for the coastal and inland waters of the United States. The Advanced Baseline Imager (ABI) on the current iteration of the Geostationary Operational Environmental Satellites, termed the R Series (GOES-R), however, provides sub-hourly imagery and the opportunity to overcome this deficit and to leverage a large repository of existing GOES-R aquatic observations. The fulfillment of this opportunity is assessed herein using a spectrally simplified, two-channel aquatic algorithm consistent with ABI wave bands to estimate the diffuse attenuation coefficient for photosynthetically available radiation, Kd(PAR). First, an in situ ABI dataset was synthesized using a globally representative dataset of above- and in-water radiometric data products. Values of Kd(PAR) were estimated by fitting the ratio of the shortest and longest visible wave bands from the in situ ABI dataset to coincident, in situKd(PAR) data products. The algorithm was evaluated based on an iterative cross-validation analysis in which 80% of the dataset was randomly partitioned for fitting and the remaining 20% was used for validation. The iteration producing the median coefficient of determination (R[2]) value (0.88) resulted in a root mean square difference of 0.319m[-1], or 8.5% of the range in the validation dataset. Second, coincident mid-day images of central and southern California from ABI and from the Moderate Resolution Imaging Spectroradiometer (MODIS) were compared using Google Earth Engine (GEE). GEE default ABI reflectance values were adjusted based on a near infrared signal. Matchups between the ABI and MODIS imagery indicated similar spatial variability (R[2]=0.60) between ABI adjusted blue-to-red reflectance ratio values and MODIS default diffuse attenuation coefficient for spectral downward irradiance at 490 nm, Kd(490), values. This work demonstrates that if an operational capability to provide ABI aquatic data products was realized, the spectral configuration of ABI would potentially support a sub-hourly, visible aquatic data product that is applicable to water-mass tracing and physical oceanography research.}, } @article {pmid36254227, year = {2022}, author = {Song, L and Wang, H and Shi, Z}, title = {A Literature Review Research on Monitoring Conditions of Mechanical Equipment Based on Edge Computing.}, journal = {Applied bionics and biomechanics}, volume = {2022}, number = {}, pages = {9489306}, pmid = {36254227}, issn = {1176-2322}, abstract = {The motivation of this research is to review all methods used in data compression of collected data in monitoring the condition of equipment based on the framework of edge computing. Since a large amount of signal data is collected when monitoring conditions of mechanical equipment, namely, signals of running machines are continuously transmitted to be crunched, compressed data should be handled effectively. However, this process occupies resources since data transmission requires the allocation of a large capacity. To resolve this problem, this article examines the monitoring conditions of equipment based on edge computing. First, the signal is pre-processed by edge computing, so that the fault characteristics can be identified quickly. Second, signals with difficult-to-identify fault characteristics need to be compressed to save transmission resources. Then, different types of signal data collected in mechanical equipment conditions are compressed by various compression methods and uploaded to the cloud. Finally, the cloud platform, which has powerful processing capability, is processed to improve the volume of the data transmission. By examining and analyzing the monitoring conditions and signal compression methods of mechanical equipment, the future development trend is elaborated to provide references and ideas for the contemporary research of data monitoring and data compression algorithms. Consequently, the manuscript presents different compression methods in detail and clarifies the data compression methods used for the signal compression of equipment based on edge computing.}, } @article {pmid36253343, year = {2022}, author = {Kobayashi, K and Yoshida, H and Tanjo, T and Aida, K}, title = {Cloud service checklist for academic communities and customization for genome medical research.}, journal = {Human genome variation}, volume = {9}, number = {1}, pages = {36}, pmid = {36253343}, issn = {2054-345X}, support = {JP21km0405501//Japan Agency for Medical Research and Development (AMED)/ ; JP21km0405501//Japan Agency for Medical Research and Development (AMED)/ ; JP21km0405501//Japan Agency for Medical Research and Development (AMED)/ ; JP21km0405501//Japan Agency for Medical Research and Development (AMED)/ ; }, abstract = {In this paper, we present a cloud service checklist designed to help IT administrators or researchers in academic organizations select the most suitable cloud services. This checklist, which comprises items that we believe IT administrators or researchers in academic organizations should consider when they adopt cloud services, comprehensively covers the issues related to a variety of cloud services, including security, functionality, performance, and law. In response to the increasing demands for storage and computing resources in genome medical science communities, various guidelines for using resources operated by external organizations, such as cloud services, have been published by different academic funding agencies and the Japanese government. However, it is sometimes difficult to identify the checklist items that satisfy the genome medical science community's guidelines, and some of these requirements are not included in the existing checklists. This issue provided our motivation for creating a cloud service checklist customized for genome medical research communities. The resulting customized checklist is designed to help researchers easily find information about the cloud services that satisfy the guidelines in genome medical science communities. Additionally, we explore whether many cloud service providers satisfy the requirements or checklist items in the cloud service checklist for genome medical research by evaluating their survey responses.}, } @article {pmid36248925, year = {2022}, author = {Bu, H and Xia, J and Wu, Q and Chen, L}, title = {Relationship Discovery and Hierarchical Embedding for Web Service Quality Prediction.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9240843}, pmid = {36248925}, issn = {1687-5273}, mesh = {*Cloud Computing ; *Internet ; Research Design ; }, abstract = {Web Services Quality Prediction has become a popular research theme in Cloud Computing and the Internet of Things. Graph Convolutional Network (GCN)-based methods are more efficient by aggregating feature information from the local graph neighborhood. Despite the fact that these prior works have demonstrated better prediction performance, they are still challenged as follows: (1) first, the user-service bipartite graph is essentially a heterogeneous graph that contains four kinds of relationships. Previous GCN-based models have only focused on using some of these relationships. Therefore, how to fully mine and use the above relationships is critical to improving the prediction accuracy. (2) After the embedding is obtained from the GCNs, the commonly used similarity calculation methods for downstream prediction need to traverse the data one by one, which is time-consuming. To address these challenges, this work proposes a novel relationship discovery and hierarchical embedding method based on GCNs (named as RDHE), which designs a dual mechanism to represent services and users, respectively, designs a new community discovery method and a fast similarity calculation process, which can fully mine and utilize the relationships in the graph. The results of the experiment on the real data set show that this method greatly improved the accuracy of the web service quality prediction.}, } @article {pmid36248269, year = {2022}, author = {Mondal, P and Dutta, T and Qadir, A and Sharma, S}, title = {Radar and optical remote sensing for near real-time assessments of cyclone impacts on coastal ecosystems.}, journal = {Remote sensing in ecology and conservation}, volume = {8}, number = {4}, pages = {506-520}, pmid = {36248269}, issn = {2056-3485}, abstract = {Rapid impact assessment of cyclones on coastal ecosystems is critical for timely rescue and rehabilitation operations in highly human-dominated landscapes. Such assessments should also include damage assessments of vegetation for restoration planning in impacted natural landscapes. Our objective is to develop a remote sensing-based approach combining satellite data derived from optical (Sentinel-2), radar (Sentinel-1), and LiDAR (Global Ecosystem Dynamics Investigation) platforms for rapid assessment of post-cyclone inundation in non-forested areas and vegetation damage in a primarily forested ecosystem. We apply this multi-scalar approach for assessing damages caused by the cyclone Amphan that hit coastal India and Bangladesh in May 2020, severely flooding several districts in the two countries, and causing destruction to the Sundarban mangrove forests. Our analysis shows that at least 6821 sq. km. land across the 39 study districts was inundated even after 10 days after the cyclone. We further calculated the change in forest greenness as the difference in normalized difference vegetation index (NDVI) pre- and post-cyclone. Our findings indicate a <0.2 unit decline in NDVI in 3.45 sq. km. of the forest. Rapid assessment of post-cyclone damage in mangroves is challenging due to limited navigability of waterways, but critical for planning of mitigation and recovery measures. We demonstrate the utility of Otsu method, an automated statistical approach of the Google Earth Engine platform to identify inundated areas within days after a cyclone. Our radar-based inundation analysis advances current practices because it requires minimal user inputs, and is effective in the presence of high cloud cover. Such rapid assessment, when complemented with detailed information on species and vegetation composition, can inform appropriate restoration efforts in severely impacted regions and help decision makers efficiently manage resources for recovery and aid relief. We provide the datasets from this study on an open platform to aid in future research and planning endeavors.}, } @article {pmid36247859, year = {2022}, author = {Saba Raoof, S and Durai, MAS}, title = {A Comprehensive Review on Smart Health Care: Applications, Paradigms, and Challenges with Case Studies.}, journal = {Contrast media & molecular imaging}, volume = {2022}, number = {}, pages = {4822235}, pmid = {36247859}, issn = {1555-4317}, mesh = {Delivery of Health Care ; Humans ; *Internet of Things ; Quality of Life ; *Telemedicine/methods ; }, abstract = {Growth and advancement of the Deep Learning (DL) and the Internet of Things (IoT) are figuring out their way over the modern contemporary world through integrating various technologies in distinct fields viz, agriculture, manufacturing, energy, transportation, supply chains, cities, healthcare, and so on. Researchers had identified the feasibility of integrating deep learning, cloud, and IoT to enhance the overall automation, where IoT may prolong its application area through utilizing cloud services and the cloud can even prolong its applications through data acquired by IoT devices like sensors and deep learning for disease detection and diagnosis. This study explains a summary of various techniques utilized in smart healthcare, i.e., deep learning, cloud-based-IoT applications in smart healthcare, fog computing in smart healthcare, and challenges and issues faced by smart healthcare and it presents a wider scope as it is not intended for a particular application such aspatient monitoring, disease detection, and diagnosing and the technologies used for developing this smart systems are outlined. Smart health bestows the quality of life. Convenient and comfortable living is made possible by the services provided by smart healthcare systems (SHSs). Since healthcare is a massive area with enormous data and a broad spectrum of diseases associated with different organs, immense research can be done to overcome the drawbacks of traditional healthcare methods. Deep learning with IoT can effectively be applied in the healthcare sector to automate the diagnosing and treatment process even in rural areas remotely. Applications may include disease prevention and diagnosis, fitness and patient monitoring, food monitoring, mobile health, telemedicine, emergency systems, assisted living, self-management of chronic diseases, and so on.}, } @article {pmid36246518, year = {2022}, author = {Coelho, R and Braga, R and David, JMN and Stroele, V and Campos, F and Dantas, M}, title = {A Blockchain-Based Architecture for Trust in Collaborative Scientific Experimentation.}, journal = {Journal of grid computing}, volume = {20}, number = {4}, pages = {35}, pmid = {36246518}, issn = {1572-9184}, abstract = {In scientific collaboration, data sharing, the exchange of ideas and results are essential to knowledge construction and the development of science. Hence, we must guarantee interoperability, privacy, traceability (reinforcing transparency), and trust. Provenance has been widely recognized for providing a history of the steps taken in scientific experiments. Consequently, we must support traceability, assisting in scientific results' reproducibility. One of the technologies that can enhance trust in collaborative scientific experimentation is blockchain. This work proposes an architecture, named BlockFlow, based on blockchain, provenance, and cloud infrastructure to bring trust and traceability in the execution of collaborative scientific experiments. The proposed architecture is implemented on Hyperledger, and a scenario about the genomic sequencing of the SARS-CoV-2 coronavirus is used to evaluate the architecture, discussing the benefits of providing traceability and trust in collaborative scientific experimentation. Furthermore, the architecture addresses the heterogeneity of shared data, facilitating interpretation by geographically distributed researchers and analysis of such data. Through a blockchain-based architecture that provides support on provenance and blockchain, we can enhance data sharing, traceability, and trust in collaborative scientific experiments.}, } @article {pmid36240003, year = {2022}, author = {Kang, G and Kim, YG}, title = {Secure Collaborative Platform for Health Care Research in an Open Environment: Perspective on Accountability in Access Control.}, journal = {Journal of medical Internet research}, volume = {24}, number = {10}, pages = {e37978}, pmid = {36240003}, issn = {1438-8871}, mesh = {*Blockchain ; *Computer Security ; Health Services Research ; Humans ; Privacy ; Social Responsibility ; }, abstract = {BACKGROUND: With the recent use of IT in health care, a variety of eHealth data are increasingly being collected and stored by national health agencies. As these eHealth data can advance the modern health care system and make it smarter, many researchers want to use these data in their studies. However, using eHealth data brings about privacy and security concerns. The analytical environment that supports health care research must also consider many requirements. For these reasons, countries generally provide research platforms for health care, but some data providers (eg, patients) are still concerned about the security and privacy of their eHealth data. Thus, a more secure platform for health care research that guarantees the utility of eHealth data while focusing on its security and privacy is needed.

OBJECTIVE: This study aims to implement a research platform for health care called the health care big data platform (HBDP), which is more secure than previous health care research platforms. The HBDP uses attribute-based encryption to achieve fine-grained access control and encryption of stored eHealth data in an open environment. Moreover, in the HBDP, platform administrators can perform the appropriate follow-up (eg, block illegal users) and monitoring through a private blockchain. In other words, the HBDP supports accountability in access control.

METHODS: We first identified potential security threats in the health care domain. We then defined the security requirements to minimize the identified threats. In particular, the requirements were defined based on the security solutions used in existing health care research platforms. We then proposed the HBDP, which meets defined security requirements (ie, access control, encryption of stored eHealth data, and accountability). Finally, we implemented the HBDP to prove its feasibility.

RESULTS: This study carried out case studies for illegal user detection via the implemented HBDP based on specific scenarios related to the threats. As a result, the platform detected illegal users appropriately via the security agent. Furthermore, in the empirical evaluation of massive data encryption (eg, 100,000 rows with 3 sensitive columns within 46 columns) for column-level encryption, full encryption after column-level encryption, and full decryption including column-level decryption, our approach achieved approximately 3 minutes, 1 minute, and 9 minutes, respectively. In the blockchain, average latencies and throughputs in 1Org with 2Peers reached approximately 18 seconds and 49 transactions per second (TPS) in read mode and approximately 4 seconds and 120 TPS in write mode in 300 TPS.

CONCLUSIONS: The HBDP enables fine-grained access control and secure storage of eHealth data via attribute-based encryption cryptography. It also provides nonrepudiation and accountability through the blockchain. Therefore, we consider that our proposal provides a sufficiently secure environment for the use of eHealth data in health care research.}, } @article {pmid36237741, year = {2022}, author = {Konstantinou, C and Xanthopoulos, A and Tsaras, K and Skoularigis, J and Triposkiadis, F and Papagiannis, D}, title = {Vaccination Coverage Against Human Papillomavirus in Female Students in Cyprus.}, journal = {Cureus}, volume = {14}, number = {9}, pages = {e28936}, pmid = {36237741}, issn = {2168-8184}, abstract = {Background Human papillomavirus (HPV) has been associated with the development of several cancers and cardiovascular diseases in females. Nevertheless, there is still poor data on vaccination coverage against HPV in several countries, including Cyprus. The main target of the present research was to assess the vaccination status of female students in Cyprus. Methodology An online survey was conducted via a cloud-based short questionnaire on Google Forms. Students with a known email address were initially invited via email to complete the survey. The questionnaire was distributed to 340 students, aged 18-49 years old, who lived in Cyprus (60% response rate). Results The total vaccination coverage was 38.1%. The mean age of participants was 23.5 (±6.5) years. The major reason for non-vaccination was the belief that participants were not at risk of serious illness from HPV infection (22%), followed by the reported lack of time to get vaccinated (16%) and inertia (13%). The students who had information about the safety of HPV vaccines from electronic sources of information (television, websites, and blogs) had lower vaccination coverage compared to those who had received information from alternative sources (primary health centers, family doctors, or obstetricians) (relative risk (RR) = 1.923, 95% confidence interval (CI) = 0.9669-3.825; p = 0.033). No significant differences in vaccination rates between participants who were coming from schools of health sciences versus those from financial schools (RR = 1.082, 95% CI = 0.7574-1.544; p = 0.3348) were observed. Conclusions Public health policy interventions and education on HPV vaccines are effective ways to improve the awareness and acceptance rate of HPV vaccination among female students and improve the HPV vaccination coverage level in Cyprus.}, } @article {pmid36236773, year = {2022}, author = {Shumba, AT and Montanaro, T and Sergi, I and Fachechi, L and De Vittorio, M and Patrono, L}, title = {Leveraging IoT-Aware Technologies and AI Techniques for Real-Time Critical Healthcare Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {19}, pages = {}, pmid = {36236773}, issn = {1424-8220}, mesh = {Aged ; *Artificial Intelligence ; Biocompatible Materials ; *Blood Glucose ; Delivery of Health Care ; Humans ; Technology ; }, abstract = {Personalised healthcare has seen significant improvements due to the introduction of health monitoring technologies that allow wearable devices to unintrusively monitor physiological parameters such as heart health, blood pressure, sleep patterns, and blood glucose levels, among others. Additionally, utilising advanced sensing technologies based on flexible and innovative biocompatible materials in wearable devices allows high accuracy and precision measurement of biological signals. Furthermore, applying real-time Machine Learning algorithms to highly accurate physiological parameters allows precise identification of unusual patterns in the data to provide health event predictions and warnings for timely intervention. However, in the predominantly adopted architectures, health event predictions based on Machine Learning are typically obtained by leveraging Cloud infrastructures characterised by shortcomings such as delayed response times and privacy issues. Fortunately, recent works highlight that a new paradigm based on Edge Computing technologies and on-device Artificial Intelligence significantly improve the latency and privacy issues. Applying this new paradigm to personalised healthcare architectures can significantly improve their efficiency and efficacy. Therefore, this paper reviews existing IoT healthcare architectures that utilise wearable devices and subsequently presents a scalable and modular system architecture to leverage emerging technologies to solve identified shortcomings. The defined architecture includes ultrathin, skin-compatible, flexible, high precision piezoelectric sensors, low-cost communication technologies, on-device intelligence, Edge Intelligence, and Edge Computing technologies. To provide development guidelines and define a consistent reference architecture for improved scalable wearable IoT-based critical healthcare architectures, this manuscript outlines the essential functional and non-functional requirements based on deductions from existing architectures and emerging technology trends. The presented system architecture can be applied to many scenarios, including ambient assisted living, where continuous surveillance and issuance of timely warnings can afford independence to the elderly and chronically ill. We conclude that the distribution and modularity of architecture layers, local AI-based elaboration, and data packaging consistency are the more essential functional requirements for critical healthcare application use cases. We also identify fast response time, utility, comfort, and low cost as the essential non-functional requirements for the defined system architecture.}, } @article {pmid36236664, year = {2022}, author = {Shahzad, K and Zia, T and Qazi, EU}, title = {A Review of Functional Encryption in IoT Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {19}, pages = {}, pmid = {36236664}, issn = {1424-8220}, support = {SRC-PR2-01//Security Research Center at Naif Arab University for Security Sciences/ ; }, abstract = {The Internet of Things (IoT) represents a growing aspect of how entities, including humans and organizations, are likely to connect with others in their public and private interactions. The exponential rise in the number of IoT devices, resulting from ever-growing IoT applications, also gives rise to new opportunities for exploiting potential security vulnerabilities. In contrast to conventional cryptosystems, frameworks that incorporate fine-grained access control offer better opportunities for protecting valuable assets, especially when the connectivity level is dense. Functional encryption is an exciting new paradigm of public-key encryption that supports fine-grained access control, generalizing a range of existing fine-grained access control mechanisms. This survey reviews the recent applications of functional encryption and the major cryptographic primitives that it covers, identifying areas where the adoption of these primitives has had the greatest impact. We first provide an overview of different application areas where these access control schemes have been applied. Then, an in-depth survey of how the schemes are used in a multitude of applications related to IoT is given, rendering a potential vision of security and integrity that this growing field promises. Towards the end, we identify some research trends and state the open challenges that current developments face for a secure IoT realization.}, } @article {pmid36236587, year = {2022}, author = {Qin, M and Liu, T and Hou, B and Gao, Y and Yao, Y and Sun, H}, title = {A Low-Latency RDP-CORDIC Algorithm for Real-Time Signal Processing of Edge Computing Devices in Smart Grid Cyber-Physical Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {19}, pages = {}, pmid = {36236587}, issn = {1424-8220}, support = {2019YJ0309//Sichuan Provincial Science and Technology Department/ ; }, abstract = {Smart grids are being expanded in scale with the increasing complexity of the equipment. Edge computing is gradually replacing conventional cloud computing due to its low latency, low power consumption, and high reliability. The CORDIC algorithm has the characteristics of high-speed real-time processing and is very suitable for hardware accelerators in edge computing devices. The iterative calculation method of the CORDIC algorithm yet leads to problems such as complex structure and high consumption of hardware resource. In this paper, we propose an RDP-CORDIC algorithm which pre-computes all micro-rotation directions and transforms the conventional single-stage iterative structure into a three-stage and multi-stage combined iterative structure, thereby enabling it to solve the problems of the conventional CORDIC algorithm with many iterations and high consumption. An accuracy compensation algorithm for the direction prediction constant is also proposed to solve the problem of high ROM consumption in the high precision implementation of the RDP-CORDIC algorithm. The experimental results showed that the RDP-CORDIC algorithm had faster computation speed and lower resource consumption with higher guaranteed accuracy than other CORDIC algorithms. Therefore, the RDP-CORDIC algorithm proposed in this paper may effectively increase computation performance while reducing the power and resource consumption of edge computing devices in smart grid systems.}, } @article {pmid36236546, year = {2022}, author = {Busaeed, S and Katib, I and Albeshri, A and Corchado, JM and Yigitcanlar, T and Mehmood, R}, title = {LidSonic V2.0: A LiDAR and Deep-Learning-Based Green Assistive Edge Device to Enhance Mobility for the Visually Impaired.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {19}, pages = {}, pmid = {36236546}, issn = {1424-8220}, support = {RG-11-611-38//King Abdulaziz University/ ; }, mesh = {*Deep Learning ; *Persons with Disabilities ; Humans ; *Self-Help Devices ; *Persons with Visual Disabilities ; *Wheelchairs ; }, abstract = {Over a billion people around the world are disabled, among whom 253 million are visually impaired or blind, and this number is greatly increasing due to ageing, chronic diseases, and poor environments and health. Despite many proposals, the current devices and systems lack maturity and do not completely fulfill user requirements and satisfaction. Increased research activity in this field is required in order to encourage the development, commercialization, and widespread acceptance of low-cost and affordable assistive technologies for visual impairment and other disabilities. This paper proposes a novel approach using a LiDAR with a servo motor and an ultrasonic sensor to collect data and predict objects using deep learning for environment perception and navigation. We adopted this approach using a pair of smart glasses, called LidSonic V2.0, to enable the identification of obstacles for the visually impaired. The LidSonic system consists of an Arduino Uno edge computing device integrated into the smart glasses and a smartphone app that transmits data via Bluetooth. Arduino gathers data, operates the sensors on the smart glasses, detects obstacles using simple data processing, and provides buzzer feedback to visually impaired users. The smartphone application collects data from Arduino, detects and classifies items in the spatial environment, and gives spoken feedback to the user on the detected objects. In comparison to image-processing-based glasses, LidSonic uses far less processing time and energy to classify obstacles using simple LiDAR data, according to several integer measurements. We comprehensively describe the proposed system's hardware and software design, having constructed their prototype implementations and tested them in real-world environments. Using the open platforms, WEKA and TensorFlow, the entire LidSonic system is built with affordable off-the-shelf sensors and a microcontroller board costing less than USD 80. Essentially, we provide designs of an inexpensive, miniature green device that can be built into, or mounted on, any pair of glasses or even a wheelchair to help the visually impaired. Our approach enables faster inference and decision-making using relatively low energy with smaller data sizes, as well as faster communications for edge, fog, and cloud computing.}, } @article {pmid36236536, year = {2022}, author = {Lei, L and Kou, L and Zhan, X and Zhang, J and Ren, Y}, title = {An Anomaly Detection Algorithm Based on Ensemble Learning for 5G Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {19}, pages = {}, pmid = {36236536}, issn = {1424-8220}, mesh = {*Algorithms ; Cloud Computing ; Learning ; Machine Learning ; *Software ; }, abstract = {With the advent of the digital information age, new data services such as virtual reality, industrial Internet, and cloud computing have proliferated in recent years. As a result, it increases operator demand for 5G bearer networks by providing features such as high transmission capacity, ultra-long transmission distance, network slicing, and intelligent management and control. Software-defined networking, as a new network architecture, intends to increase network flexibility and agility and can better satisfy the demands of 5G networks for network slicing. Nevertheless, software-defined networking still faces the challenge of network intrusion. We propose an abnormal traffic detection method based on the stacking method and self-attention mechanism, which makes up for the shortcoming of the inability to track long-term dependencies between data samples in ensemble learning. Our method utilizes a self-attention mechanism and a convolutional network to automatically learn long-term associations between traffic samples and provide them to downstream tasks in sample embedding. In addition, we design a novel stacking ensemble method, which computes the sample embedding and the predicted values of the heterogeneous base learner through the fusion module to obtain the final outlier results. This paper conducts experiments on abnormal traffic datasets in the software-defined network environment, calculates precision, recall and F1-score, and compares and analyzes them with other algorithms. The experimental results show that the method designed in this paper achieves 0.9972, 0.9996, and 0.9984 in multiple indicators of precision, recall, and F1-score, respectively, which are better than the comparison methods.}, } @article {pmid36236523, year = {2022}, author = {Yi, F and Zhang, L and Xu, L and Yang, S and Lu, Y and Zhao, D}, title = {WSNEAP: An Efficient Authentication Protocol for IIoT-Oriented Wireless Sensor Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {19}, pages = {}, pmid = {36236523}, issn = {1424-8220}, mesh = {Computer Communication Networks ; *Computer Security ; *Internet of Things ; }, abstract = {With the development of the Industrial Internet of Things (IIoT), industrial wireless sensors need to upload the collected private data to the cloud servers, resulting in a large amount of private data being exposed on the Internet. Private data are vulnerable to hacking. Many complex wireless-sensor-authentication protocols have been proposed. In this paper, we proposed an efficient authentication protocol for IIoT-oriented wireless sensor networks. The protocol introduces the PUF chip, and uses the Bloom filter to save and query the challenge-response pairs generated by the PUF chip. It ensures the security of the physical layer of the device and reduces the computing cost and communication cost of the wireless sensor side. The protocol introduces a pre-authentication mechanism to achieve continuous authentication between the gateway and the cloud server. The overall computational cost of the protocol is reduced. Formal security analysis and informal security analysis proved that our proposed protocol has more security features. We implemented various security primitives using the MIRACL cryptographic library and GMP large number library. Our proposed protocol was compared in-depth with related work. Detailed experiments show that our proposed protocol significantly reduces the computational cost and communication cost on the wireless sensor side and the overall computational cost of the protocol.}, } @article {pmid36236264, year = {2022}, author = {Thirumalaisamy, M and Basheer, S and Selvarajan, S and Althubiti, SA and Alenezi, F and Srivastava, G and Lin, JC}, title = {Interaction of Secure Cloud Network and Crowd Computing for Smart City Data Obfuscation.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {19}, pages = {}, pmid = {36236264}, issn = {1424-8220}, mesh = {*Algorithms ; *Cloud Computing ; Software ; }, abstract = {There can be many inherent issues in the process of managing cloud infrastructure and the platform of the cloud. The platform of the cloud manages cloud software and legality issues in making contracts. The platform also handles the process of managing cloud software services and legal contract-based segmentation. In this paper, we tackle these issues directly with some feasible solutions. For these constraints, the Averaged One-Dependence Estimators (AODE) classifier and the SELECT Applicable Only to Parallel Server (SELECT-APSL ASA) method are proposed to separate the data related to the place. ASA is made up of the AODE and SELECT Applicable Only to Parallel Server. The AODE classifier is used to separate the data from smart city data based on the hybrid data obfuscation technique. The data from the hybrid data obfuscation technique manages 50% of the raw data, and 50% of hospital data is masked using the proposed transmission. The analysis of energy consumption before the cryptosystem shows the total packet delivered by about 71.66% compared with existing algorithms. The analysis of energy consumption after cryptosystem assumption shows 47.34% consumption, compared to existing state-of-the-art algorithms. The average energy consumption before data obfuscation decreased by 2.47%, and the average energy consumption after data obfuscation was reduced by 9.90%. The analysis of the makespan time before data obfuscation decreased by 33.71%. Compared to existing state-of-the-art algorithms, the study of makespan time after data obfuscation decreased by 1.3%. These impressive results show the strength of our methodology.}, } @article {pmid36227021, year = {2023}, author = {Yang, DM and Chang, TJ and Hung, KF and Wang, ML and Cheng, YF and Chiang, SH and Chen, MF and Liao, YT and Lai, WQ and Liang, KH}, title = {Smart healthcare: A prospective future medical approach for COVID-19.}, journal = {Journal of the Chinese Medical Association : JCMA}, volume = {86}, number = {2}, pages = {138-146}, pmid = {36227021}, issn = {1728-7731}, mesh = {Humans ; *COVID-19 ; Artificial Intelligence ; Post-Acute COVID-19 Syndrome ; Pandemics/prevention & control ; Delivery of Health Care ; }, abstract = {COVID-19 has greatly affected human life for over 3 years. In this review, we focus on smart healthcare solutions that address major requirements for coping with the COVID-19 pandemic, including (1) the continuous monitoring of severe acute respiratory syndrome coronavirus 2, (2) patient stratification with distinct short-term outcomes (eg, mild or severe diseases) and long-term outcomes (eg, long COVID), and (3) adherence to medication and treatments for patients with COVID-19. Smart healthcare often utilizes medical artificial intelligence (AI) and cloud computing and integrates cutting-edge biological and optoelectronic techniques. These are valuable technologies for addressing the unmet needs in the management of COVID. By leveraging deep learning/machine learning capabilities and big data, medical AI can perform precise prognosis predictions and provide reliable suggestions for physicians' decision-making. Through the assistance of the Internet of Medical Things, which encompasses wearable devices, smartphone apps, internet-based drug delivery systems, and telemedicine technologies, the status of mild cases can be continuously monitored and medications provided at home without the need for hospital care. In cases that develop into severe cases, emergency feedback can be provided through the hospital for rapid treatment. Smart healthcare can possibly prevent the development of severe COVID-19 cases and therefore lower the burden on intensive care units.}, } @article {pmid36225544, year = {2022}, author = {Li, H}, title = {Cloud Computing Image Processing Application in Athlete Training High-Resolution Image Detection.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7423411}, pmid = {36225544}, issn = {1687-5273}, mesh = {Algorithms ; *Artificial Intelligence ; Athletes ; *Cloud Computing ; Humans ; Image Processing, Computer-Assisted/methods ; }, abstract = {The rapid development of Internet of things mobile application technology and artificial intelligence technology has given birth to a lot of services that can meet the needs of modern life, such as augmented reality technology, face recognition services, and language recognition and translation, which are often applied to various fields, and some other aspects of information communication and processing services. It has been used on various mobile phone, computer, or tablet user clients. Terminal equipment is subject to the ultralow latency and low energy consumption requirements of the above-mentioned applications. Therefore, the gap between resource-demanding application services and resource-limited mobile devices will bring great problems to the current and future development of IoT mobile applications. Based on the local image features of depth images, this paper designs an image detection method for athletes' motion posture. First, according to the characteristics of the local image, the depth image of the athlete obtained through Kinect is converted into bone point data. Next, a 3-stage exploration algorithm is used to perform block matching calculations on the athlete's bone point image to predict the athlete's movement posture. At the same time, using the characteristics of the Euclidean distance of the bone point image, the movement behavior is recognized. According to the experimental results, for some external environmental factors, such as sun illumination and other factors, the image detection method designed in this paper can effectively avoid their interference and influence and show the movement posture of athletes, showing excellent accuracy and robustness in predicting the movement posture of athletes and action recognition. This method can simplify a series of calibration tasks in the initial stage of 3D video surveillance and infer the posture of the observation target and recognize it in real time. The one that has good application values has specific reference values for the same job.}, } @article {pmid36210997, year = {2022}, author = {B, D and M, L and R, A and Kallimani, JS and Walia, R and Belete, B}, title = {A Novel Feature Selection with Hybrid Deep Learning Based Heart Disease Detection and Classification in the e-Healthcare Environment.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {1167494}, pmid = {36210997}, issn = {1687-5273}, mesh = {Cloud Computing ; *Deep Learning ; *Heart Diseases/diagnosis ; Humans ; Neural Networks, Computer ; *Telemedicine ; }, abstract = {With the advancements in data mining, wearables, and cloud computing, online disease diagnosis services have been widely employed in the e-healthcare environment and improved the quality of the services. The e-healthcare services help to reduce the death rate by the earlier identification of the diseases. Simultaneously, heart disease (HD) is a deadly disorder, and patient survival depends on early diagnosis of HD. Early HD diagnosis and categorization play a key role in the analysis of clinical data. In the context of e-healthcare, we provide a novel feature selection with hybrid deep learning-based heart disease detection and classification (FSHDL-HDDC) model. The two primary preprocessing processes of the FSHDL-HDDC approach are data normalisation and the replacement of missing values. The FSHDL-HDDC method also necessitates the development of a feature selection method based on the elite opposition-based squirrel searchalgorithm (EO-SSA) in order to determine the optimal subset of features. Moreover, an attention-based convolutional neural network (ACNN) with long short-term memory (LSTM), called (ACNN-LSTM) model, is utilized for the detection of HD by using medical data. An extensive experimental study is performed to ensure the improved classification performance of the FSHDL-HDDC technique. A detailed comparison study reported the betterment of the FSHDL-HDDC method on existing techniques interms of different performance measures. The suggested system, the FSHDL-HDDC, has reached its maximum level of accuracy, which is 0.9772.}, } @article {pmid36210990, year = {2022}, author = {Chen, X and Huang, X}, title = {Application of Price Competition Model Based on Computational Neural Network in Risk Prediction of Transnational Investment.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8906385}, pmid = {36210990}, issn = {1687-5273}, mesh = {Algorithms ; Commerce ; Industry ; *Investments ; *Neural Networks, Computer ; }, abstract = {Aiming at the scenario where edge devices rely on cloud servers for collaborative computing, this paper proposes an efficient edge-cloud collaborative reasoning method. In order to meet the application's specific requirements for delay or accuracy, an optimal division point selection algorithm is proposed. A kind of multichannel supply chain price game model is constructed, and nonlinear dynamics theory is introduced into the research of the multichannel supply chain market. According to the actual competition situation, the different business strategies of retailers are considered in the modeling, which makes the model closer to the actual competition situation. Taking the retailer's profit as an indicator, the influence of the chaos phenomenon on the market performance is analyzed. Compared with the previous studies, this thesis uses nonlinear theory to better reveal the operating laws of the economic system. This paper selects company A in the financial industry to acquire company B in Sweden. It is concluded that company B is currently facing financial difficulties, but its brand and technical advantages are far superior to company A. The indirect financial risk index of company B, that is, the investment environment, is analyzed, and the final investment environment score of the country where company B is located is 90 points, which is an excellent grade by scoring the investment environment of the target enterprise. Combining the investment environment score and the alarm situation prediction score, it is concluded that the postmerger financial risk warning level of company A is in serious alarm.}, } @article {pmid36207705, year = {2022}, author = {Zhao, Y and Rokhani, FZ and Sazlina, SG and Devaraj, NK and Su, J and Chew, BH}, title = {Defining the concepts of a smart nursing home and its potential technology utilities that integrate medical services and are acceptable to stakeholders: a scoping review.}, journal = {BMC geriatrics}, volume = {22}, number = {1}, pages = {787}, pmid = {36207705}, issn = {1471-2318}, mesh = {Aged ; Humans ; *Nursing Homes ; *Quality of Life ; Skilled Nursing Facilities ; Technology ; }, abstract = {BACKGROUND AND OBJECTIVES: Smart technology in nursing home settings has the potential to elevate an operation that manages more significant number of older residents. However, the concepts, definitions, and types of smart technology, integrated medical services, and stakeholders' acceptability of smart nursing homes are less clear. This scoping review aims to define a smart nursing home and examine the qualitative evidence on technological feasibility, integration of medical services, and acceptability of the stakeholders.

METHODS: Comprehensive searches were conducted on stakeholders' websites (Phase 1) and 11 electronic databases (Phase 2), for existing concepts of smart nursing home, on what and how technologies and medical services were implemented in nursing home settings, and acceptability assessment by the stakeholders. The publication year was inclusive from January 1999 to September 2021. The language was limited to English and Chinese. Included articles must report nursing home settings related to older adults ≥ 60 years old with or without medical demands but not bed-bound. Technology Readiness Levels were used to measure the readiness of new technologies and system designs. The analysis was guided by the Framework Method and the smart technology adoption behaviours of elder consumers theoretical model. The results were reported according to the PRISMA-ScR.

RESULTS: A total of 177 literature (13 website documents and 164 journal articles) were selected. Smart nursing homes are technology-assisted nursing homes that allow the life enjoyment of their residents. They used IoT, computing technologies, cloud computing, big data and AI, information management systems, and digital health to integrate medical services in monitoring abnormal events, assisting daily living, conducting teleconsultation, managing health information, and improving the interaction between providers and residents. Fifty-five percent of the new technologies were ready for use in nursing homes (levels 6-7), and the remaining were proven the technical feasibility (levels 1-5). Healthcare professionals with higher education, better tech-savviness, fewer years at work, and older adults with more severe illnesses were more acceptable to smart technologies.

CONCLUSIONS: Smart nursing homes with integrated medical services have great potential to improve the quality of care and ensure older residents' quality of life.}, } @article {pmid36206751, year = {2022}, author = {Chen, L and Yu, L and Liu, Y and Xu, H and Ma, L and Tian, P and Zhu, J and Wang, F and Yi, K and Xiao, H and Zhou, F and Yang, Y and Cheng, Y and Bai, L and Wang, F and Zhu, Y}, title = {Space-time-regulated imaging analyzer for smart coagulation diagnosis.}, journal = {Cell reports. Medicine}, volume = {3}, number = {10}, pages = {100765}, pmid = {36206751}, issn = {2666-3791}, mesh = {*Artificial Intelligence ; Prospective Studies ; *Blood Coagulation ; Blood Coagulation Factors ; Fibrinogen/analysis ; }, abstract = {The development of intelligent blood coagulation diagnoses is awaited to meet the current need for large clinical time-sensitive caseloads due to its efficient and automated diagnoses. Herein, a method is reported and validated to realize it through artificial intelligence (AI)-assisted optical clotting biophysics (OCB) properties identification. The image differential calculation is used for precise acquisition of OCB properties with elimination of initial differences, and the strategy of space-time regulation allows on-demand space time OCB properties identification and enables diverse blood function diagnoses. The integrated applications of smartphones and cloud computing offer a user-friendly automated analysis for accurate and convenient diagnoses. The prospective assays of clinical cases (n = 41) show that the system realizes 97.6%, 95.1%, and 100% accuracy for coagulation factors, fibrinogen function, and comprehensive blood coagulation diagnoses, respectively. This method should enable more low-cost and convenient diagnoses and provide a path for potential diagnostic-markers finding.}, } @article {pmid36206264, year = {2022}, author = {Fu, Z}, title = {Computer cyberspace security mechanism supported by cloud computing.}, journal = {PloS one}, volume = {17}, number = {10}, pages = {e0271546}, pmid = {36206264}, issn = {1932-6203}, mesh = {Algorithms ; *Cloud Computing ; Computer Security ; Computers ; Internet ; *NAD ; }, abstract = {To improve the cybersecurity of Cloud Computing (CC) system. This paper proposes a Network Anomaly Detection (NAD) model based on the Fuzzy-C-Means (FCM) clustering algorithm. Secondly, the Cybersecurity Assessment Model (CAM) based on Grey Relational Grade (GRG) is creatively constructed. Finally, combined with Rivest Shamir Adleman (RSA) algorithm, this work proposes a CC network-oriented data encryption technology, selects different data sets for different models, and tests each model through design experiments. The results show that the average Correct Detection Rate (CDR) of the NAD model for different types of abnormal data is 93.33%. The average False Positive Rate (FPR) and the average Unreported Rate (UR) are 6.65% and 16.27%, respectively. Thus, the NAD model can ensure a high detection accuracy in the case of sufficient data. Meanwhile, the cybersecurity situation prediction by the CAM is in good agreement with the actual situation. The error between the average value of cybersecurity situation prediction and the actual value is only 0.82%, and the prediction accuracy is high. The RSA algorithm can control the average encryption time for very large text, about 12s. The decryption time is slightly longer but within a reasonable range. For different-size text, the encryption time is maintained within 0.5s. This work aims to provide important technical support for anomaly detection, overall security situation analysis, and data transmission security protection of CC systems to improve their cybersecurity.}, } @article {pmid36204298, year = {2022}, author = {Zhang, C and Cheng, T and Li, D and Yu, X and Chen, F and He, Q}, title = {Low-host double MDA workflow for uncultured ASFV positive blood and serum sample sequencing.}, journal = {Frontiers in veterinary science}, volume = {9}, number = {}, pages = {936781}, pmid = {36204298}, issn = {2297-1769}, abstract = {African swine fever (ASF) is a highly lethal and contagious disease caused by African swine fever virus (ASFV). Whole-genome sequencing of ASFV is necessary to study its mutation, recombination, and trace its transmission. Uncultured samples have a considerable amount of background DNA, which causes waste of sequencing throughput, storage space, and computing resources. Sequencing methods attempted for uncultured samples have various drawbacks. In this study, we improved C18 spacer MDA (Multiple Displacement Amplification)-combined host DNA exhaustion strategy to remove background DNA and fit NGS and TGS sequencing. Using this workflow, we successfully sequenced two uncultured ASFV positive samples. The results show that this method can significantly reduce the percentage of background DNA. We also developed software that can perform real-time base call and analyses in set intervals of ASFV TGS sequencing reads on a cloud server.}, } @article {pmid36197869, year = {2023}, author = {Guo, MH and Liu, ZN and Mu, TJ and Hu, SM}, title = {Beyond Self-Attention: External Attention Using Two Linear Layers for Visual Tasks.}, journal = {IEEE transactions on pattern analysis and machine intelligence}, volume = {45}, number = {5}, pages = {5436-5447}, doi = {10.1109/TPAMI.2022.3211006}, pmid = {36197869}, issn = {1939-3539}, abstract = {Attention mechanisms, especially self-attention, have played an increasingly important role in deep feature representation for visual tasks. Self-attention updates the feature at each position by computing a weighted sum of features using pair-wise affinities across all positions to capture the long-range dependency within a single sample. However, self-attention has quadratic complexity and ignores potential correlation between different samples. This article proposes a novel attention mechanism which we call external attention, based on two external, small, learnable, shared memories, which can be implemented easily by simply using two cascaded linear layers and two normalization layers; it conveniently replaces self-attention in existing popular architectures. External attention has linear complexity and implicitly considers the correlations between all data samples. We further incorporate the multi-head mechanism into external attention to provide an all-MLP architecture, external attention MLP (EAMLP), for image classification. Extensive experiments on image classification, object detection, semantic segmentation, instance segmentation, image generation, and point cloud analysis reveal that our method provides results comparable or superior to the self-attention mechanism and some of its variants, with much lower computational and memory costs.}, } @article {pmid36194325, year = {2023}, author = {Zhou, Y and Hu, Z and Geng, Q and Ma, J and Liu, J and Wang, M and Wang, Y}, title = {Monitoring and analysis of desertification surrounding Qinghai Lake (China) using remote sensing big data.}, journal = {Environmental science and pollution research international}, volume = {30}, number = {7}, pages = {17420-17436}, pmid = {36194325}, issn = {1614-7499}, mesh = {Humans ; *Remote Sensing Technology ; *Conservation of Natural Resources/methods ; Lakes ; Big Data ; Environmental Monitoring/methods ; China ; }, abstract = {Desertification is one of the most serious ecological environmental problems in the world. Monitoring the spatiotemporal dynamics of desertification is crucial for its control. The region around Qinghai Lake, in the northeastern part of the Qinghai-Tibet Plateau in China, is a special ecological function area and a climate change sensitive area, making its environmental conditions a great concern. Using cloud computing via Google Earth Engine (GEE), we collected Landsat 5 TM, Landsat 8 OLI/TIRS, and MODIS Albedo images from 2000 to 2020 in the region around Qinghai Lake, acquired land surface albedo (Albedo), and normalized vegetation index (NDVI) to build a remote sensing monitoring model of desertification. Our results showed that the desertification difference index based on the Albedo-NDVI feature space could reflect the degree of desertification in the region around Qinghai Lake. GEE offers significant advantages, such as massive data processing and long-term dynamic monitoring. The desertification land area fluctuated downward in the study area from 2000 to 2020, and the overall desertification status improved. Natural factors, such as climate change from warm-dry to warm-wet and decreased wind speed, and human factors improved the desertification situation. The findings indicate that desertification in the region around Qinghai Lake has been effectively controlled, and the overall desertification trend is improving.}, } @article {pmid36190152, year = {2022}, author = {Greene, D}, title = {Landlords of the internet: Big data and big real estate.}, journal = {Social studies of science}, volume = {52}, number = {6}, pages = {904-927}, doi = {10.1177/03063127221124943}, pmid = {36190152}, issn = {1460-3659}, mesh = {Humans ; *Big Data ; *Internet ; Commerce ; Technology ; }, abstract = {Who owns the internet? It depends where you look. The physical assets at the core of the internet, the warehouses that store the cloud's data and interlink global networks, are owned not by technology firms like Google and Facebook but by commercial real estate barons who compete with malls and property storage empires. Granted an empire by the US at the moment of the internet's commercialization, these internet landlords shaped how the network of networks that we call the internet physically connects, and how personal and business data is stored and transmitted. Under their governance, internet exchanges, colocation facilities, and data centers take on a double life as financialized real estate assets that circle the globe even as their servers and cables are firmly rooted in place. The history of internet landlords forces a fundamental reconsideration of the business model at the base of the internet. This history makes clear that the internet was never an exogenous shock to capitalist social relations, but rather a touchstone example of an economic system increasingly ruled by asset owners like landlords.}, } @article {pmid36188195, year = {2022}, author = {Zhou, Y and Varzaneh, MG}, title = {Efficient and scalable patients clustering based on medical big data in cloud platform.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {11}, number = {1}, pages = {49}, pmid = {36188195}, issn = {2192-113X}, abstract = {With the outbreak and popularity of COVID-19 pandemic worldwide, the volume of patients is increasing rapidly all over the world, which brings a big risk and challenge for the maintenance of public healthcare. In this situation, quick integration and analysis of the medical records of patients in a cloud platform are of positive and valuable significance for accurate recognition and scientific diagnosis of the healthy conditions of potential patients. However, due to the big volume of medical data of patients distributed in different platforms (e.g., multiple hospitals), how to integrate these data for patient clustering and analysis in a time-efficient and scalable manner in cloud platform is still a challenging task, while guaranteeing the capability of privacy-preservation. Motivated by this fact, a time-efficient, scalable and privacy-guaranteed patient clustering method in cloud platform is proposed in this work. At last, we demonstrate the competitive advantages of our method via a set of simulated experiments. Experiment results with competitive methods in current research literatures have proved the feasibility of our proposal.}, } @article {pmid36185458, year = {2022}, author = {Moser, N and Yu, LS and Rodriguez Manzano, J and Malpartida-Cardenas, K and Au, A and Arkell, P and Cicatiello, C and Moniri, A and Miglietta, L and Wang, WH and Wang, SF and Holmes, A and Chen, YH and Georgiou, P}, title = {Quantitative detection of dengue serotypes using a smartphone-connected handheld lab-on-chip platform.}, journal = {Frontiers in bioengineering and biotechnology}, volume = {10}, number = {}, pages = {892853}, pmid = {36185458}, issn = {2296-4185}, abstract = {Dengue is one of the most prevalent infectious diseases in the world. Rapid, accurate and scalable diagnostics are key to patient management and epidemiological surveillance of the dengue virus (DENV), however current technologies do not match required clinical sensitivity and specificity or rely on large laboratory equipment. In this work, we report the translation of our smartphone-connected handheld Lab-on-Chip (LoC) platform for the quantitative detection of two dengue serotypes. At its core, the approach relies on the combination of Complementary Metal-Oxide-Semiconductor (CMOS) microchip technology to integrate an array of 78 × 56 potentiometric sensors, and a label-free reverse-transcriptase loop mediated isothermal amplification (RT-LAMP) assay. The platform communicates to a smartphone app which synchronises results in real time with a secure cloud server hosted by Amazon Web Services (AWS) for epidemiological surveillance. The assay on our LoC platform (RT-eLAMP) was shown to match performance on a gold-standard fluorescence-based real-time instrument (RT-qLAMP) with synthetic DENV-1 and DENV-2 RNA and extracted RNA from 9 DENV-2 clinical isolates, achieving quantitative detection in under 15 min. To validate the portability of the platform and the geo-tagging capabilities, we led our study in the laboratories at Imperial College London, UK, and Kaohsiung Medical Hospital, Taiwan. This approach carries high potential for application in low resource settings at the point of care (PoC).}, } @article {pmid36179156, year = {2022}, author = {Sun, J and Endo, S and Lin, H and Hayden, P and Vedral, V and Yuan, X}, title = {Perturbative Quantum Simulation.}, journal = {Physical review letters}, volume = {129}, number = {12}, pages = {120505}, doi = {10.1103/PhysRevLett.129.120505}, pmid = {36179156}, issn = {1079-7114}, abstract = {Approximation based on perturbation theory is the foundation for most of the quantitative predictions of quantum mechanics, whether in quantum many-body physics, chemistry, quantum field theory, or other domains. Quantum computing provides an alternative to the perturbation paradigm, yet state-of-the-art quantum processors with tens of noisy qubits are of limited practical utility. Here, we introduce perturbative quantum simulation, which combines the complementary strengths of the two approaches, enabling the solution of large practical quantum problems using limited noisy intermediate-scale quantum hardware. The use of a quantum processor eliminates the need to identify a solvable unperturbed Hamiltonian, while the introduction of perturbative coupling permits the quantum processor to simulate systems larger than the available number of physical qubits. We present an explicit perturbative expansion that mimics the Dyson series expansion and involves only local unitary operations, and show its optimality over other expansions under certain conditions. We numerically benchmark the method for interacting bosons, fermions, and quantum spins in different topologies, and study different physical phenomena, such as information propagation, charge-spin separation, and magnetism, on systems of up to 48 qubits only using an 8+1 qubit quantum hardware. We demonstrate our scheme on the IBM quantum cloud, verifying its noise robustness and illustrating its potential for benchmarking large quantum processors with smaller ones.}, } @article {pmid36174081, year = {2022}, author = {Mul, E and Ancin Murguzur, FJ and Hausner, VH}, title = {Impact of the COVID-19 pandemic on human-nature relations in a remote nature-based tourism destination.}, journal = {PloS one}, volume = {17}, number = {9}, pages = {e0273354}, pmid = {36174081}, issn = {1932-6203}, mesh = {*COVID-19/epidemiology ; Human Characteristics ; Humans ; Pandemics ; *Tourism ; Travel ; }, abstract = {Tourism and nature-based recreation has changed dramatically during the COVID-19 pandemic. Travel restrictions caused sharp declines in visitation numbers, particularly in remote areas, such as northern Norway. In addition, the pandemic may have altered human-nature relationships by changing visitor behaviour and preferences. We studied visitor numbers and behaviour in northern Norway, based on user-generated data, in the form of photographic material that was uploaded to the popular online platform Flickr. A total of 195.200 photographs, taken by 5.247 photographers were subjected to Google's "Cloud Vision" automatic content analysis algorithm. The resulting collection of labels that were assigned to each photograph was analysed in structural topic models, using photography date (relative to the start of the pandemic measures in Norway) and reported or estimated photographers' nationality as explanatory variables. Our results show that nature-based recreation relating to "mountains" and "winter" became more prevalent during the pandemic, amongst both domestic and international photographers. Shifts in preferences due to the pandemic outbreak strongly depended on nationality, with domestic visitors demonstrating a wide interest in topics while international visitors maintained their preference for nature-based experiences. Among those activities that suffered the most from decline in international tourism was northern lights and cruises as indicated by the topic models. On the other hand, images depicting mountains and flora and fauna increased their prevalence during the pandemic. Domestic visitors, on the other hand, spent more time in urban settings as a result of restrictions, which results in a higher prevalence of non-nature related images. Our results underscore the need to consider the dynamic nature of human-nature relationships. The contrast in flexibility to adapt to changing conditions and travel restrictions should be incorporated in collaborative efforts of municipalities and tour operators to develop sustainable local nature-based tourism products, particularly in remote areas.}, } @article {pmid36172315, year = {2022}, author = {Jiang, Y and Lei, Y}, title = {Implementation of Trusted Traceability Query Using Blockchain and Deep Reinforcement Learning in Resource Management.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6559517}, pmid = {36172315}, issn = {1687-5273}, mesh = {Algorithms ; *Blockchain ; Cloud Computing ; Technology ; }, abstract = {To better track the source of goods and maintain the quality of goods, the present work uses blockchain technology to establish a system for trusted traceability queries and information management. Primarily, the analysis is made on the shortcomings of the traceability system in the field of agricultural products at the present stage; the study is conducted on the application of the traceability system to blockchain technology, and a new model of agricultural product traceability system is established based on the blockchain technology. Then, a study is carried out on the task scheduling problem of resource clusters in cloud computing resource management. The present work expands the task model and uses the deep Q network algorithm in deep reinforcement learning to solve various optimization objectives preset in the task scheduling problem. Next, a resource management algorithm based on a deep Q network is proposed. Finally, the performance of the algorithm is analyzed from the aspects of parameters, structure, and task load. Experiments show that the algorithm is better than Shortest Job First (SJF), Tetris [∗] , Packer, and other classic task scheduling algorithms in different optimization objectives. In the traceability system test, the traceability accuracy is 99% for the constructed system in the first group of samples. In the second group, the traceability accuracy reaches 98% for the constructed system. In general, the traceability accuracy of the system proposed here is above 98% in 8 groups of experimental samples, and the traceability accuracy is close for each experimental group. The resource management approach of the traceability system constructed here provides some ideas for the application of reinforcement learning technology in the construction of traceability systems.}, } @article {pmid36171329, year = {2022}, author = {Wolf, K and Dawson, RJ and Mills, JP and Blythe, P and Morley, J}, title = {Towards a digital twin for supporting multi-agency incident management in a smart city.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {16221}, pmid = {36171329}, issn = {2045-2322}, mesh = {*Ambulances ; Cities ; Cloud Computing ; *Emergency Medical Services ; Floods ; }, abstract = {Cost-effective on-demand computing resources can help to process the increasing number of large, diverse datasets generated from smart internet-enabled technology, such as sensors, CCTV cameras, and mobile devices, with high temporal resolution. Category 1 emergency services (Ambulance, Fire and Rescue, and Police) can benefit from access to (near) real-time traffic- and weather data to coordinate multiple services, such as reassessing a route on the transport network affected by flooding or road incidents. However, there is a tendency not to utilise available smart city data sources, due to the heterogeneous data landscape, lack of real-time information, and communication inefficiencies. Using a systems engineering approach, we identify the current challenges faced by stakeholders involved in incident response and formulate future requirements for an improved system. Based on these initial findings, we develop a use case using Microsoft Azure cloud computing technology for analytical functionalities that can better support stakeholders in their response to an incident. Our prototype allows stakeholders to view available resources, send automatic updates and integrate location-based real-time weather and traffic data. We anticipate our study will provide a foundation for the future design of a data ontology for multi-agency incident response in smart cities of the future.}, } @article {pmid36164525, year = {2022}, author = {Roy, B and Bari, E}, title = {Examining the relationship between land surface temperature and landscape features using spectral indices with Google Earth Engine.}, journal = {Heliyon}, volume = {8}, number = {9}, pages = {e10668}, pmid = {36164525}, issn = {2405-8440}, abstract = {Land surface temperature (LST) is strongly influenced by landscape features as they change the thermal characteristics of the surface greatly. Normalized Difference Vegetation Index (NDVI), Normalized Difference Water Index (NDWI), Normalized Difference Built-up Index (NDBI), and Normalized Difference Bareness Index (NDBAI) correspond to vegetation cover, water bodies, impervious build-ups, and bare lands, respectively. These indices were utilized to demonstrate the relationship between multiple landscape features and LST using the spectral indices derived from images of Landsat 5 Thematic Mapper (TM), and Landsat 8 Operational Land Imager (OLI) of Sylhet Sadar Upazila (2000-2018). Google Earth Engine (GEE) cloud computing platform was used to filter, process, and analyze trends with logistic regression. LST and other spectral indices were calculated. Changes in LST (2000-2018) range from -6 °C to +4 °C in the study area. Because of higher vegetation cover and reserve forest, the north-eastern part of the study region had the greatest variations in LST. The spectral indices corresponding to landscape features have a considerable explanatory capacity for describing LST scenarios. The correlation of these indices with LST ranges from -0.52 (NDBI) to +0.57 (NDVI).}, } @article {pmid36161827, year = {2022}, author = {Huemer, J and Kronschläger, M and Ruiss, M and Sim, D and Keane, PA and Findl, O and Wagner, SK}, title = {Diagnostic accuracy of code-free deep learning for detection and evaluation of posterior capsule opacification.}, journal = {BMJ open ophthalmology}, volume = {7}, number = {1}, pages = {}, pmid = {36161827}, issn = {2397-3269}, support = {MR/T000953/1/MRC_/Medical Research Council/United Kingdom ; MR/T019050/1/MRC_/Medical Research Council/United Kingdom ; }, mesh = {Area Under Curve ; *Capsule Opacification/diagnosis ; *Deep Learning ; Humans ; Retrospective Studies ; Vision Disorders ; }, abstract = {OBJECTIVE: To train and validate a code-free deep learning system (CFDLS) on classifying high-resolution digital retroillumination images of posterior capsule opacification (PCO) and to discriminate between clinically significant and non-significant PCOs.

METHODS AND ANALYSIS: For this retrospective registry study, three expert observers graded two independent datasets of 279 images three separate times with no PCO to severe PCO, providing binary labels for clinical significance. The CFDLS was trained and internally validated using 179 images of a training dataset and externally validated with 100 images. Model development was through Google Cloud AutoML Vision. Intraobserver and interobserver variabilities were assessed using Fleiss kappa (κ) coefficients and model performance through sensitivity, specificity and area under the curve (AUC).

RESULTS: Intraobserver variability κ values for observers 1, 2 and 3 were 0.90 (95% CI 0.86 to 0.95), 0.94 (95% CI 0.90 to 0.97) and 0.88 (95% CI 0.82 to 0.93). Interobserver agreement was high, ranging from 0.85 (95% CI 0.79 to 0.90) between observers 1 and 2 to 0.90 (95% CI 0.85 to 0.94) for observers 1 and 3. On internal validation, the AUC of the CFDLS was 0.99 (95% CI 0.92 to 1.0); sensitivity was 0.89 at a specificity of 1. On external validation, the AUC was 0.97 (95% CI 0.93 to 0.99); sensitivity was 0.84 and specificity was 0.92.

CONCLUSION: This CFDLS provides highly accurate discrimination between clinically significant and non-significant PCO equivalent to human expert graders. The clinical value as a potential decision support tool in different models of care warrants further research.}, } @article {pmid36160943, year = {2022}, author = {Sulis, E and Amantea, IA and Aldinucci, M and Boella, G and Marinello, R and Grosso, M and Platter, P and Ambrosini, S}, title = {An ambient assisted living architecture for hospital at home coupled with a process-oriented perspective.}, journal = {Journal of ambient intelligence and humanized computing}, volume = {}, number = {}, pages = {1-19}, pmid = {36160943}, issn = {1868-5137}, abstract = {The growing number of next-generation applications offers a relevant opportunity for healthcare services, generating an urgent need for architectures for systems integration. Moreover, the huge amount of stored information related to events can be explored by adopting a process-oriented perspective. This paper discusses an Ambient Assisted Living healthcare architecture to manage hospital home-care services. The proposed solution relies on adopting an event manager to integrate sources ranging from personal devices to web-based applications. Data are processed on a federated cloud platform offering computing infrastructure and storage resources to improve scientific research. In a second step, a business process analysis of telehealth and telemedicine applications is considered. An initial study explored the business process flow to capture the main sequences of tasks, activities, events. This step paves the way for the integration of process mining techniques to compliance monitoring in an AAL architecture framework.}, } @article {pmid36157083, year = {2023}, author = {Ahmad, I and Abdullah, S and Ahmed, A}, title = {IoT-fog-based healthcare 4.0 system using blockchain technology.}, journal = {The Journal of supercomputing}, volume = {79}, number = {4}, pages = {3999-4020}, pmid = {36157083}, issn = {0920-8542}, abstract = {Real-time tracking and surveillance of patients' health has become ubiquitous in the healthcare sector as a result of the development of fog, cloud computing, and Internet of Things (IoT) technologies. Medical IoT (MIoT) equipment often transfers health data to a pharmaceutical data center, where it is saved, evaluated, and made available to relevant stakeholders or users. Fog layers have been utilized to increase the scalability and flexibility of IoT-based healthcare services, by providing quick response times and low latency. Our proposed solution focuses on an electronic healthcare system that manages both critical and non-critical patients simultaneously. Fog layer is distributed into two halves: critical fog cluster and non-critical fog cluster. Critical patients are handled at critical fog clusters for quick response, while non-critical patients are handled using blockchain technology at non-critical fog cluster, which protects the privacy of patient health records. The suggested solution requires little modification to the current IoT ecosystem while decrease the response time for critical messages and offloading the cloud infrastructure. Reduced storage requirements for cloud data centers benefit users in addition to saving money on construction and operating expenses. In addition, we examined the proposed work for recall, accuracy, precision, and F-score. The results show that the suggested approach is successful in protecting privacy while retaining standard network settings. Moreover, suggested system and benchmark are evaluated in terms of system response time, drop rate, throughput, fog, and cloud utilization. Evaluated results clearly indicate the performance of proposed system is better than benchmark.}, } @article {pmid36156947, year = {2022}, author = {Yue, Q}, title = {Dynamic Database Design of Sports Quality Based on Genetic Data Algorithm and Artificial Intelligence.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7473109}, pmid = {36156947}, issn = {1687-5273}, mesh = {*Artificial Intelligence ; Exercise ; Humans ; *Sports ; Students ; Surveys and Questionnaires ; }, abstract = {According to the traditional data mining method, it is no longer applicable to obtain knowledge from the database, and the knowledge mined in the past must be constantly updated. In the last few years, Internet technology and cloud computing technology have emerged. The emergence of these two technologies has brought about Earth-shaking changes in certain industries. In order to efficiently retrieve and count a large amount of data at a lower cost, big data technology is proposed. Big data technology has played an important role for data with various types, huge quantities, and extremely fast changing speeds. However, big data technology still has some limitations, and researchers still cannot obtain the value of data in a short period of time with low cost and high efficiency. The sports database constructed in this paper can effectively carry out statistics and analysis on the data of sports learning. In the prototype system, log files can be mined, classified, and preprocessed. For the incremental data obtained by preprocessing, incremental data mining can be performed, a classification model can be established, and the database can be updated to provide users with personalized services. Through the method of data survey, the author studied the students' exercise status, and the feedback data show that college students lack the awareness of physical exercise and have no fitness habit. It is necessary to accelerate the reform of college sports and cultivate students' good sports awareness.}, } @article {pmid36156946, year = {2022}, author = {Zhu, J}, title = {The Usage of Designing the Urban Sculpture Scene Based on Edge Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9346771}, pmid = {36156946}, issn = {1687-5273}, mesh = {*Algorithms ; *Computers ; Electrocardiography ; Humans ; }, abstract = {To not only achieve the goal of urban cultural construction but also save the cost of urban sculpture space design, EC (edge computing) is combined with urban sculpture space design and planning first. Then it briefly discusses the service category, system architecture, advantages, and characteristics of urban sculpture, as well as the key points and difficulties of its construction, and the layered architecture of EC for urban sculpture spaces is proposed. Secondly, the cloud edge combination technology is adopted, and the urban sculpture is used as a specific function of the edge system node to conduct an in-depth analysis to build an urban sculpture safety supervision system architecture platform. Finally, the actual energy required for implementation is predicted and evaluated, the specific monitoring system coverage is set up, and some equations are made for calculating the energy consumption of the monitored machines according to the number of devices and route planning required by the urban sculpture safety supervision system. An optimization algorithm for energy consumption is proposed based on reinforcement learning and compared with the three control groups. The results show that when the seven monitoring devices cover detection points less than 800, the required energy consumption increases linearly. When the detection devices cover more than 800 detection points, the required energy consumption is stable and varies from 10000 to 12000; that is, when the number of monitoring devices is 7, the optimal number of monitoring points is about 800. When the number of detection points is fixed, increasing the number of monitoring devices in a small range can reduce the total energy consumption. The optimization algorithm based on the reinforcement learning proposal can obtain an approximate optimal solution. The research results show that the combination of edge computing and urban sculpture can expand the function of urban sculpture and make it serve people better.}, } @article {pmid36156942, year = {2022}, author = {Zheng, M and Liu, B and Sun, L}, title = {LawRec: Automatic Recommendation of Legal Provisions Based on Legal Text Analysis.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6313161}, pmid = {36156942}, issn = {1687-5273}, mesh = {*Artificial Intelligence ; Humans ; *Neural Networks, Computer ; Technology ; }, abstract = {Smart court technologies are making full use of modern science to promote the modernization of the trial system and trial capabilities, for example, artificial intelligence, Internet of things, and cloud computing. The smart court technologies can improve the efficiency of case handling and achieving convenience for the people. Article recommendation is an important part of intelligent trial. For ordinary people without legal background, the traditional information retrieval system that searches laws and regulations based on keywords is not applicable because they do not have the ability to extract professional legal vocabulary from complex case processes. This paper proposes a law recommendation framework, called LawRec, based on Bidirectional Encoder Representation from Transformers (BERT) and Skip-Recurrent Neural Network (Skip-RNN) models. It intends to integrate the knowledge of legal provisions with the case description and uses the BERT model to learn the case description text and legal knowledge, respectively. At last, laws and regulations for cases can be recommended. Experiment results show that the proposed LawRec can achieve better performance than state-of-the-art methods.}, } @article {pmid36153857, year = {2022}, author = {Park, JY and Lee, K and Chung, DR}, title = {Public interest in the digital transformation accelerated by the COVID-19 pandemic and perception of its future impact.}, journal = {The Korean journal of internal medicine}, volume = {37}, number = {6}, pages = {1223-1233}, pmid = {36153857}, issn = {2005-6648}, mesh = {Humans ; Pandemics ; *COVID-19/epidemiology ; Artificial Intelligence ; *Virtual Reality ; Perception ; }, abstract = {BACKGROUND/AIMS: The coronavirus disease 2019 (COVID-19) pandemic has accelerated digital transformation (DT). We investigated the trend of the public interest in technologies regarding the DT and Koreans' experiences and their perceptions of the future impact of these technologies.

METHODS: Using Google Trends, the relative search volume (RSV) for topics including "coronavirus," "artificial intelligence," "cloud," "big data," and "metaverse" were retrieved for the period from January 2020 to January 2022. A survey was conducted to assess the population's knowledge, experience, and perceptions regarding the DT.

RESULTS: The RSV for "metaverse" showed an increasing trend, in contrast to those for "cloud," "big data," and "coronavirus." The RSVs for DT-related keywords had a negative correlation with the number of new weekly COVID-19 cases. In our survey, 78.1% responded that the positive impact of the DT on future lives would outweigh the negative impact. The predictors for this positive perception included experiences with the metaverse (4.0-fold) and virtual reality (VR)/augmented reality (AR) education (3.8-fold). Respondents predicted that the biggest change would occur in the healthcare sector after transportation/ communication.

CONCLUSION: Koreans' search interest for "metaverse" showed an increasing trend during the COVID-19 pandemic. Koreans believe that DT will bring about big changes in the healthcare sector. Most of the survey respondents have a positive outlook about the impact of DT on future life, and the predictors for this positive perception include the experiences with the metaverse or VR/AR education. Healthcare professionals need to accelerate the adoption of DT in clinical practice, education and training.}, } @article {pmid36151775, year = {2022}, author = {Zhao, XG and Cao, H}, title = {Linking research of biomedical datasets.}, journal = {Briefings in bioinformatics}, volume = {23}, number = {6}, pages = {}, doi = {10.1093/bib/bbac373}, pmid = {36151775}, issn = {1477-4054}, support = {2018YFD0901103//Key Research and Development Program of the Ministry of Science and Technology/ ; }, mesh = {Humans ; *Ecosystem ; *Algorithms ; Knowledge ; }, abstract = {Biomedical data preprocessing and efficient computing can be as important as the statistical methods used to fit the data; data processing needs to consider application scenarios, data acquisition and individual rights and interests. We review common principles, knowledge and methods of integrated research according to the whole-pipeline processing mechanism diverse, coherent, sharing, auditable and ecological. First, neuromorphic and native algorithms integrate diverse datasets, providing linear scalability and high visualization. Second, the choice mechanism of different preprocessing, analysis and transaction methods from raw to neuromorphic was summarized on the node and coordinator platforms. Third, combination of node, network, cloud, edge, swarm and graph builds an ecosystem of cohort integrated research and clinical diagnosis and treatment. Looking forward, it is vital to simultaneously combine deep computing, mass data storage and massively parallel communication.}, } @article {pmid36146408, year = {2022}, author = {Jeong, Y and Kim, T}, title = {A Cluster-Driven Adaptive Training Approach for Federated Learning.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146408}, issn = {1424-8220}, mesh = {*Algorithms ; Humans ; *Learning ; Machine Learning ; }, abstract = {Federated learning (FL) is a promising collaborative learning approach in edge computing, reducing communication costs and addressing the data privacy concerns of traditional cloud-based training. Owing to this, diverse studies have been conducted to distribute FL into industry. However, there still remain the practical issues of FL to be solved (e.g., handling non-IID data and stragglers) for an actual implementation of FL. To address these issues, in this paper, we propose a cluster-driven adaptive training approach (CATA-Fed) to enhance the performance of FL training in a practical environment. CATA-Fed employs adaptive training during the local model updates to enhance the efficiency of training, reducing the waste of time and resources due to the presence of the stragglers and also provides a straggler mitigating scheme, which can reduce the workload of straggling clients. In addition to this, CATA-Fed clusters the clients considering the data size and selects the training participants within a cluster to reduce the magnitude differences of local gradients collected in the global model update under a statistical heterogeneous condition (e.g., non-IID data). During this client selection process, a proportional fair scheduling is employed for securing the data diversity as well as balancing the load of clients. We conduct extensive experiments using three benchmark datasets (MNIST, Fashion-MNIST, and CIFAR-10), and the results show that CATA-Fed outperforms the previous FL schemes (FedAVG, FedProx, and TiFL) with regard to the training speed and test accuracy under the diverse FL conditions.}, } @article {pmid36146382, year = {2022}, author = {Caro-Via, S and Vidaña-Vila, E and Ginovart-Panisello, GJ and Martínez-Suquía, C and Freixes, M and Alsina-Pagès, RM}, title = {Edge-Computing Meshed Wireless Acoustic Sensor Network for Indoor Sound Monitoring.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146382}, issn = {1424-8220}, mesh = {*Acoustics ; Humans ; Monitoring, Physiologic ; }, abstract = {This work presents the design of a wireless acoustic sensor network (WASN) that monitors indoor spaces. The proposed network would enable the acquisition of valuable information on the behavior of the inhabitants of the space. This WASN has been conceived to work in any type of indoor environment, including houses, hospitals, universities or even libraries, where the tracking of people can give relevant insight, with a focus on ambient assisted living environments. The proposed WASN has several priorities and differences compared to the literature: (i) presenting a low-cost flexible sensor able to monitor wide indoor areas; (ii) balance between acoustic quality and microphone cost; and (iii) good communication between nodes to increase the connectivity coverage. A potential application of the proposed network could be the generation of a sound map of a certain location (house, university, offices, etc.) or, in the future, the acoustic detection of events, giving information about the behavior of the inhabitants of the place under study. Each node of the network comprises an omnidirectional microphone and a computation unit, which processes acoustic information locally following the edge-computing paradigm to avoid sending raw data to a cloud server, mainly for privacy and connectivity purposes. Moreover, this work explores the placement of acoustic sensors in a real scenario, following acoustic coverage criteria. The proposed network aims to encourage the use of real-time non-invasive devices to obtain behavioral and environmental information, in order to take decisions in real-time with the minimum intrusiveness in the location under study.}, } @article {pmid36146368, year = {2022}, author = {Barron, A and Sanchez-Gallegos, DD and Carrizales-Espinoza, D and Gonzalez-Compean, JL and Morales-Sandoval, M}, title = {On the Efficient Delivery and Storage of IoT Data in Edge-Fog-Cloud Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146368}, issn = {1424-8220}, support = {41756 PRONACES-CONACYT//Consejo Nacional de Ciencia y Tecnología/ ; }, mesh = {*Cloud Computing ; *Computer Communication Networks ; Electrocardiography ; Internet ; }, abstract = {Cloud storage has become a keystone for organizations to manage large volumes of data produced by sensors at the edge as well as information produced by deep and machine learning applications. Nevertheless, the latency produced by geographic distributed systems deployed on any of the edge, the fog, or the cloud, leads to delays that are observed by end-users in the form of high response times. In this paper, we present an efficient scheme for the management and storage of Internet of Thing (IoT) data in edge-fog-cloud environments. In our proposal, entities called data containers are coupled, in a logical manner, with nano/microservices deployed on any of the edge, the fog, or the cloud. The data containers implement a hierarchical cache file system including storage levels such as in-memory, file system, and cloud services for transparently managing the input/output data operations produced by nano/microservices (e.g., a sensor hub collecting data from sensors at the edge or machine learning applications processing data at the edge). Data containers are interconnected through a secure and efficient content delivery network, which transparently and automatically performs the continuous delivery of data through the edge-fog-cloud. A prototype of our proposed scheme was implemented and evaluated in a case study based on the management of electrocardiogram sensor data. The obtained results reveal the suitability and efficiency of the proposed scheme.}, } @article {pmid36146364, year = {2022}, author = {Alvear-Puertas, VE and Burbano-Prado, YA and Rosero-Montalvo, PD and Tözün, P and Marcillo, F and Hernandez, W}, title = {Smart and Portable Air-Quality Monitoring IoT Low-Cost Devices in Ibarra City, Ecuador.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146364}, issn = {1424-8220}, support = {Research project: IEA.WHP.21.02//Universidad de Las Américas/ ; Research project: CEPRA XII-2018-13//Corporacion Ecuatoriana para el Desarrollo de la Investigacion y la Academia/ ; award ref: NNF20OC0064411//Novo Nordisk Fonden/ ; }, mesh = {*Air Pollution/analysis ; Ecuador ; Environmental Monitoring/methods ; Gases/analysis ; *Internet of Things ; }, abstract = {Nowadays, increasing air-pollution levels are a public health concern that affects all living beings, with the most polluting gases being present in urban environments. For this reason, this research presents portable Internet of Things (IoT) environmental monitoring devices that can be installed in vehicles and that send message queuing telemetry transport (MQTT) messages to a server, with a time series database allocated in edge computing. The visualization stage is performed in cloud computing to determine the city air-pollution concentration using three different labels: low, normal, and high. To determine the environmental conditions in Ibarra, Ecuador, a data analysis scheme is used with outlier detection and supervised classification stages. In terms of relevant results, the performance percentage of the IoT nodes used to infer air quality was greater than 90%. In addition, the memory consumption was 14 Kbytes in a flash and 3 Kbytes in a RAM, reducing the power consumption and bandwidth needed in traditional air-pollution measuring stations.}, } @article {pmid36146329, year = {2022}, author = {Maruta, K and Nishiuchi, H and Nakazato, J and Tran, GK and Sakaguchi, K}, title = {5G/B5G mmWave Cellular Networks with MEC Prefetching Based on User Context Information.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146329}, issn = {1424-8220}, support = {723171//European Commission/ ; 0159-0048//Ministry of Internal Affairs and Communications/ ; 0155-0062//Ministry of Internal Affairs and Communications/ ; 00101//National Institute of Information and Communications Technology/ ; }, abstract = {To deal with recent increasing mobile traffic, ultra-broadband communication with millimeter-wave (mmWave) has been regarded as a key technology for 5G cellular networks. In a previous study, a mmWave heterogeneous network was composed of several mmWave small cells overlaid on the coverage of a macro cell. However, as seen from the optical fiber penetration rate worldwide, it is difficult to say that backhaul with Gbps order is available everywhere. In the case of using mmWave access under a limited backhaul capacity, it becomes a bottleneck at the backhaul; thus, mmWave access cannot fully demonstrate its potential. On the other hand, the concept of multi-access edge computing (MEC) has been proposed to decrease the response latency compared to cloud computing by deploying storage and computation resources to the user side of mobile networks. This paper introduces MEC into mmWave heterogeneous networks and proposes a content prefetching algorithm to resolve such backhaul issues. Context information, such as the destination, mobility, and traffic tendency, is shared through the macro cell to the prefetch application and data that the users request. Prefetched data is stored in the MEC and then transmitted via mmWave without a backhaul bottleneck. The effectiveness is verified through computer simulations where we implement realistic user mobility as well as traffic and backhauling models. The results show that the proposed framework achieved 95% system capacity even under the constraint of a 1 Gbps backhaul link.}, } @article {pmid36146134, year = {2022}, author = {Alghamdi, A and Zhu, J and Yin, G and Shorfuzzaman, M and Alsufyani, N and Alyami, S and Biswas, S}, title = {Blockchain Empowered Federated Learning Ecosystem for Securing Consumer IoT Features Analysis.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146134}, issn = {1424-8220}, mesh = {*Blockchain ; Computer Security ; Ecosystem ; *Internet of Things ; Privacy ; }, abstract = {Resource constraint Consumer Internet of Things (CIoT) is controlled through gateway devices (e.g., smartphones, computers, etc.) that are connected to Mobile Edge Computing (MEC) servers or cloud regulated by a third party. Recently Machine Learning (ML) has been widely used in automation, consumer behavior analysis, device quality upgradation, etc. Typical ML predicts by analyzing customers' raw data in a centralized system which raises the security and privacy issues such as data leakage, privacy violation, single point of failure, etc. To overcome the problems, Federated Learning (FL) developed an initial solution to ensure services without sharing personal data. In FL, a centralized aggregator collaborates and makes an average for a global model used for the next round of training. However, the centralized aggregator raised the same issues, such as a single point of control leaking the updated model and interrupting the entire process. Additionally, research claims data can be retrieved from model parameters. Beyond that, since the Gateway (GW) device has full access to the raw data, it can also threaten the entire ecosystem. This research contributes a blockchain-controlled, edge intelligence federated learning framework for a distributed learning platform for CIoT. The federated learning platform allows collaborative learning with users' shared data, and the blockchain network replaces the centralized aggregator and ensures secure participation of gateway devices in the ecosystem. Furthermore, blockchain is trustless, immutable, and anonymous, encouraging CIoT end users to participate. We evaluated the framework and federated learning outcomes using the well-known Stanford Cars dataset. Experimental results prove the effectiveness of the proposed framework.}, } @article {pmid36146113, year = {2022}, author = {Liu, X and Zhao, X and Liu, G and Huang, F and Huang, T and Wu, Y}, title = {Collaborative Task Offloading and Service Caching Strategy for Mobile Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146113}, issn = {1424-8220}, support = {5700-202141454A-0-0-00//the 2021 State Grid Corporation of China Science and Technology Program/ ; }, mesh = {*Algorithms ; Computer Simulation ; }, abstract = {Mobile edge computing (MEC), which sinks the functions of cloud servers, has become an emerging paradigm to solve the contradiction between delay-sensitive tasks and resource-constrained terminals. Task offloading assisted by service caching in a collaborative manner can reduce delay and balance the edge load in MEC. Due to the limited storage resources of edge servers, it is a significant issue to develop a dynamical service caching strategy according to the actual variable user demands in task offloading. Therefore, this paper investigates the collaborative task offloading problem assisted by a dynamical caching strategy in MEC. Furthermore, a two-level computing strategy called joint task offloading and service caching (JTOSC) is proposed to solve the optimized problem. The outer layer in JTOSC iteratively updates the service caching decisions based on the Gibbs sampling. The inner layer in JTOSC adopts the fairness-aware allocation algorithm and the offloading revenue preference-based bilateral matching algorithm to get a great computing resource allocation and task offloading scheme. The simulation results indicate that the proposed strategy outperforms the other four comparison strategies in terms of maximum offloading delay, service cache hit rate, and edge load balance.}, } @article {pmid36146069, year = {2022}, author = {Li, D and Mao, Y and Chen, X and Li, J and Liu, S}, title = {Deployment and Allocation Strategy for MEC Nodes in Complex Multi-Terminal Scenarios.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146069}, issn = {1424-8220}, support = {2018YFB2100100//Yunnan Power Gird Co./ ; }, abstract = {Mobile edge computing (MEC) has become an effective solution for insufficient computing and communication problems for the Internet of Things (IoT) applications due to its rich computing resources on the edge side. In multi-terminal scenarios, the deployment scheme of edge nodes has an important impact on system performance and has become an essential issue in end-edge-cloud architecture. In this article, we consider specific factors, such as spatial location, power supply, and urgency requirements of terminals, with respect to building an evaluation model to solve the allocation problem. An evaluation model based on reward, energy consumption, and cost factors is proposed. The genetic algorithm is applied to determine the optimal edge node deployment and allocation strategies. Moreover, we compare the proposed method with the k-means and ant colony algorithms. The results show that the obtained strategies achieve good evaluation results under problem constraints. Furthermore, we conduct comparison tests with different attributes to further test the performance of the proposed method.}, } @article {pmid36141163, year = {2022}, author = {Tang, X and Xu, L and Chen, G}, title = {Research on the Rapid Diagnostic Method of Rolling Bearing Fault Based on Cloud-Edge Collaboration.}, journal = {Entropy (Basel, Switzerland)}, volume = {24}, number = {9}, pages = {}, pmid = {36141163}, issn = {1099-4300}, support = {QKHJC-ZK〔2021〕YB271//The Science and Technology Foundation of Guizhou Province/ ; QKHZC〔2022〕YB074//Guizhou Science and Technology Support Project/ ; }, abstract = {Recent deep-learning methods for fault diagnosis of rolling bearings need a significant amount of computing time and resources. Most of them cannot meet the requirements of real-time fault diagnosis of rolling bearings under the cloud computing framework. This paper proposes a quick cloud-edge collaborative bearing fault diagnostic method based on the tradeoff between the advantages and disadvantages of cloud and edge computing. First, a collaborative cloud-based framework and an improved DSCNN-GAP algorithm are suggested to build a general model using the public bearing fault dataset. Second, the general model is distributed to each edge node, and a limited number of unique fault samples acquired by each edge node are used to quickly adjust the parameters of the model before running diagnostic tests. Finally, a fusion result is made from the diagnostic results of each edge node by DS evidence theory. Experiment results show that the proposed method not only improves diagnostic accuracy by DSCNN-GAP and fusion of multi-sensors, but also decreases diagnosis time by migration learning with the cloud-edge collaborative framework. Additionally, the method can effectively enhance data security and privacy protection.}, } @article {pmid36124594, year = {2022}, author = {Lin, HY and Tsai, TT and Wu, HR and Ku, MS}, title = {Secure access control using updateable attribute keys.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {19}, number = {11}, pages = {11367-11379}, doi = {10.3934/mbe.2022529}, pmid = {36124594}, issn = {1551-0018}, mesh = {*Algorithms ; Cloud Computing ; *Computer Security ; Confidentiality ; Humans ; }, abstract = {In the era of cloud computing, the technique of access control is vital to protect the confidentiality and integrity of cloud data. From the perspective of servers, they should only allow authenticated clients to gain the access of data. Specifically, the server will share a communication channel with the client by generating a common session key. It is thus regarded as a symmetric key for encrypting data in the current channel. An access control mechanism using attribute-based encryptions is most flexible, since the decryption privilege can be granted to the ones who have sufficient attributes. In the paper, the authors propose a secure access control consisting of the attributed-based mutual authentication and the attribute-based encryption. The most appealing property of our system is that the attribute keys associated with each user is periodically updatable. Moreover, we will also show that our system fulfills the security of fuzzy selective-ID assuming the hardness of Decisional Modified Bilinear Diffie-Hellman (DMBDH) problem.}, } @article {pmid36124579, year = {2022}, author = {Liu, D and Li, Z and Wang, C and Ren, Y}, title = {Enabling secure mutual authentication and storage checking in cloud-assisted IoT.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {19}, number = {11}, pages = {11034-11046}, doi = {10.3934/mbe.2022514}, pmid = {36124579}, issn = {1551-0018}, abstract = {Internet of things (IoT) is a technology that can collect the data sensed by the devices for the further real-time services. Using the technique of cloud computing to assist IoT devices in data storing can eliminate the disadvantage of the constrained local storage and computing capability. However, the complex network environment makes cloud servers vulnerable to attacks, and adversaries pretend to be legal IoT clients trying to access the cloud server. Hence, it is necessary to provide a mechanism of mutual authentication for the cloud system to enhance the storage security. In this paper, a secure mutual authentication is proposed for cloud-assisted IoT. Note that the technique of chameleon hash signature is used to construct the authentication. Moreover, the proposed scheme can provide storage checking with the assist of a fully-trusted entity, which highly improves the checking fairness and efficiency. Security analysis proves that the proposed scheme in this paper is correct. Performance analysis demonstrates that the proposed scheme can be performed with high efficiency.}, } @article {pmid36124116, year = {2022}, author = {Wu, Y and Zheng, C and Xie, L and Hao, M}, title = {Cloud-Based English Multimedia for Universities Test Questions Modeling and Applications.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4563491}, pmid = {36124116}, issn = {1687-5273}, mesh = {*Cloud Computing ; Computers ; Humans ; *Multimedia ; Software ; Universities ; }, abstract = {This study constructs a cloud computing-based college English multimedia test question modeling and application through an in-depth study of cloud computing and college English multimedia test questions. The emergence of cloud computing technology undoubtedly provides a new and ideal method to solve test data and paper management problems. This study analyzes the advantages of the Hadoop computing platform and MapReduce computing model and builds a distributed computing platform based on Hadoop using universities' existing hardware and software resources. The study analyzes the advantages of the Hadoop computing platform and the MapReduce computing model. The UML model of the system is given, the system is implemented, the system is tested functionally, and the results of the analysis are given. Multimedia is the critical link to realizing the optimization of English test questions. The proper use of multimedia test questions will undoubtedly become an inevitable trend in the development of English test questions in the future, which requires every worker on the education front to continuously analyze and study the problems arising from multimedia teaching, summarize the experience of multimedia teaching, and explore new methods of multimedia teaching, so that multimedia teaching can better promote the optimization of English test questions in colleges and universities and better serve the education teaching.}, } @article {pmid36118826, year = {2022}, author = {Zhang, F and Zhang, Z and Xiao, H}, title = {Research on Medical Big Data Analysis and Disease Prediction Method Based on Artificial Intelligence.}, journal = {Computational and mathematical methods in medicine}, volume = {2022}, number = {}, pages = {4224287}, pmid = {36118826}, issn = {1748-6718}, mesh = {*Artificial Intelligence ; *Big Data ; Cloud Computing ; Data Analysis ; Humans ; }, abstract = {In recent years, the continuous development of big data, cloud services, Internet+, artificial intelligence, and other technologies has accelerated the improvement of data communication services in the traditional pharmaceutical industry. It plays a leading role in the development of my country's pharmaceutical industry, deepening the reform of the health system, improving the efficiency and quality of medical services, and developing new technologies. In this context, we make the following research and draw the following conclusions: (1) the scale of my country's medical big data market is constantly increasing, and the global medical big data market is also increasing. Compared with the global medical big data market, China's medical big data has grown at a faster rate. From the initial 10.33% in 2015, the proportion has reached 38.7% after 7 years, and the proportion has increased by 28.37%. (2) Generally speaking, urine is mainly slightly acidic, that is, the pH is around 6.0, the normal range is 5.0 to 7.0, and there are also neutral or slightly alkaline. 8 and 7.5 are generally people with some physical problems. In recent years, the pharmaceutical industry has continuously developed technologies such as big data, cloud computing, Internet+, and artificial intelligence by improving data transmission services. As an important strategic resource of the country, the generation of great medical skills and great information is of great significance to the development of my country's pharmaceutical industry and the deepening of the reform of the national medical system. Improve the efficiency and level of medical services, and establish forms and services. Accelerate economic growth. In this sense, we set out to explore.}, } @article {pmid36108415, year = {2022}, author = {Shoeibi, A and Moridian, P and Khodatars, M and Ghassemi, N and Jafari, M and Alizadehsani, R and Kong, Y and Gorriz, JM and Ramírez, J and Khosravi, A and Nahavandi, S and Acharya, UR}, title = {An overview of deep learning techniques for epileptic seizures detection and prediction based on neuroimaging modalities: Methods, challenges, and future works.}, journal = {Computers in biology and medicine}, volume = {149}, number = {}, pages = {106053}, doi = {10.1016/j.compbiomed.2022.106053}, pmid = {36108415}, issn = {1879-0534}, mesh = {Algorithms ; *Deep Learning ; Electroencephalography/methods ; *Epilepsy/diagnostic imaging ; Humans ; Neuroimaging ; Seizures/diagnostic imaging ; }, abstract = {Epilepsy is a disorder of the brain denoted by frequent seizures. The symptoms of seizure include confusion, abnormal staring, and rapid, sudden, and uncontrollable hand movements. Epileptic seizure detection methods involve neurological exams, blood tests, neuropsychological tests, and neuroimaging modalities. Among these, neuroimaging modalities have received considerable attention from specialist physicians. One method to facilitate the accurate and fast diagnosis of epileptic seizures is to employ computer-aided diagnosis systems (CADS) based on deep learning (DL) and neuroimaging modalities. This paper has studied a comprehensive overview of DL methods employed for epileptic seizures detection and prediction using neuroimaging modalities. First, DL-based CADS for epileptic seizures detection and prediction using neuroimaging modalities are discussed. Also, descriptions of various datasets, preprocessing algorithms, and DL models which have been used for epileptic seizures detection and prediction have been included. Then, research on rehabilitation tools has been presented, which contains brain-computer interface (BCI), cloud computing, internet of things (IoT), hardware implementation of DL techniques on field-programmable gate array (FPGA), etc. In the discussion section, a comparison has been carried out between research on epileptic seizure detection and prediction. The challenges in epileptic seizures detection and prediction using neuroimaging modalities and DL models have been described. In addition, possible directions for future works in this field, specifically for solving challenges in datasets, DL, rehabilitation, and hardware models, have been proposed. The final section is dedicated to the conclusion which summarizes the significant findings of the paper.}, } @article {pmid36107981, year = {2022}, author = {Kim, YK and Kim, HJ and Lee, H and Chang, JW}, title = {Correction: Privacy-preserving parallel kNN classification algorithm using index-based filtering in cloud computing.}, journal = {PloS one}, volume = {17}, number = {9}, pages = {e0274981}, pmid = {36107981}, issn = {1932-6203}, abstract = {[This corrects the article DOI: 10.1371/journal.pone.0267908.].}, } @article {pmid36107827, year = {2022}, author = {Zhuang, Y and Jiang, N}, title = {Progressive privacy-preserving batch retrieval of lung CT image sequences based on edge-cloud collaborative computation.}, journal = {PloS one}, volume = {17}, number = {9}, pages = {e0274507}, pmid = {36107827}, issn = {1932-6203}, mesh = {*Computer Security ; Lung/diagnostic imaging ; *Privacy ; Tomography, X-Ray Computed ; }, abstract = {BACKGROUND: A computer tomography image (CI) sequence can be regarded as a time-series data that is composed of a great deal of nearby and similar CIs. Since the computational and I/O costs of similarity measure, encryption, and decryption calculation during a similarity retrieval of the large CI sequences (CIS) are extremely high, deploying all retrieval tasks in the cloud, however, will lead to excessive computing load on the cloud, which will greatly and negatively affect the retrieval performance.

METHODOLOGIES: To tackle the above challenges, the paper proposes a progressive privacy-preserving Batch Retrieval scheme for the lung CISs based on edge-cloud collaborative computation called the BRS method. There are four supporting techniques to enable the BRS method, such as: 1) batch similarity measure for CISs, 2) CIB-based privacy preserving scheme, 3) uniform edge-cloud index framework, and 4) edge buffering.

RESULTS: The experimental results reveal that our method outperforms the state-of-the-art approaches in terms of efficiency and scalability, drastically reducing response time by lowering network communication costs while enhancing retrieval safety and accuracy.}, } @article {pmid36105640, year = {2022}, author = {Veeraiah, D and Mohanty, R and Kundu, S and Dhabliya, D and Tiwari, M and Jamal, SS and Halifa, A}, title = {Detection of Malicious Cloud Bandwidth Consumption in Cloud Computing Using Machine Learning Techniques.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4003403}, pmid = {36105640}, issn = {1687-5273}, mesh = {*Cloud Computing ; Fuzzy Logic ; Humans ; *Machine Learning ; }, abstract = {The Internet of Things, sometimes known as IoT, is a relatively new kind of Internet connectivity that connects physical objects to the Internet in a way that was not possible in the past. The Internet of Things is another name for this concept (IoT). The Internet of Things has a larger attack surface as a result of its hyperconnectivity and heterogeneity, both of which are characteristics of the IoT. In addition, since the Internet of Things devices are deployed in managed and uncontrolled contexts, it is conceivable for malicious actors to build new attacks that target these devices. As a result, the Internet of Things (IoT) requires self-protection security systems that are able to autonomously interpret attacks in IoT traffic and efficiently handle the attack scenario by triggering appropriate reactions at a pace that is faster than what is currently available. In order to fulfill this requirement, fog computing must be utilised. This type of computing has the capability of integrating an intelligent self-protection mechanism into the distributed fog nodes. This allows the IoT application to be protected with the least amount of human intervention while also allowing for faster management of attack scenarios. Implementing a self-protection mechanism at malicious fog nodes is the primary objective of this research work. This mechanism should be able to detect and predict known attacks based on predefined attack patterns, as well as predict novel attacks based on no predefined attack patterns, and then choose the most appropriate response to neutralise the identified attack. In the environment of the IoT, a distributed Gaussian process regression is used at fog nodes to anticipate attack patterns that have not been established in the past. This allows for the prediction of new cyberattacks in the environment. It predicts attacks in an uncertain IoT setting at a speedier rate and with greater precision than prior techniques. It is able to effectively anticipate both low-rate and high-rate assaults in a more timely manner within the dispersed fog nodes, which enables it to mount a more accurate defence. In conclusion, a fog computing-based self-protection system is developed to choose the most appropriate reaction using fuzzy logic for detected or anticipated assaults using the suggested detection and prediction mechanisms. This is accomplished by utilising a self-protection system that is based on the development of a self-protection system that utilises the suggested detection and prediction mechanisms. The findings of the experimental investigation indicate that the proposed system identifies threats, lowers bandwidth usage, and thwarts assaults at a rate that is twenty-five percent faster than the cloud-based system implementation.}, } @article {pmid36103218, year = {2022}, author = {Huang, H and Aschettino, S and Lari, N and Lee, TH and Rosenberg, SS and Ng, X and Muthuri, S and Bakshi, A and Bishop, K and Ezzeldin, H}, title = {A Versatile and Scalable Platform That Streamlines Data Collection for Patient-Centered Studies: Usability and Feasibility Study.}, journal = {JMIR formative research}, volume = {6}, number = {9}, pages = {e38579}, pmid = {36103218}, issn = {2561-326X}, abstract = {BACKGROUND: The Food and Drug Administration Center for Biologics Evaluation and Research (CBER) established the Biologics Effectiveness and Safety (BEST) Initiative with several objectives, including the expansion and enhancement of CBER's access to fit-for-purpose data sources, analytics, tools, and infrastructures to improve the understanding of patient experiences with conditions related to CBER-regulated products. Owing to existing challenges in data collection, especially for rare disease research, CBER recognized the need for a comprehensive platform where study coordinators can engage with study participants and design and deploy studies while patients or caregivers could enroll, consent, and securely participate as well.

OBJECTIVE: This study aimed to increase awareness and describe the design, development, and novelty of the Survey of Health and Patient Experience (SHAPE) platform, its functionality and application, quality improvement efforts, open-source availability, and plans for enhancement.

METHODS: SHAPE is hosted in a Google Cloud environment and comprises 3 parts: the administrator application, participant app, and application programming interface. The administrator can build a study comprising a set of questionnaires and self-report entries through the app. Once the study is deployed, the participant can access the app, consent to the study, and complete its components. To build SHAPE to be scalable and flexible, we leveraged the open-source software development kit, Ionic Framework. This enabled the building and deploying of apps across platforms, including iOS, Android, and progressive web applications, from a single codebase by using standardized web technologies. SHAPE has been integrated with a leading Health Level 7 (HL7®) Fast Healthcare Interoperability Resources (FHIR®) application programming interface platform, 1upHealth, which allows participants to consent to 1-time data pull of their electronic health records. We used an agile-based process that engaged multiple stakeholders in SHAPE's design and development.

RESULTS: SHAPE allows study coordinators to plan, develop, and deploy questionnaires to obtain important end points directly from patients or caregivers. Electronic health record integration enables access to patient health records, which can validate and enhance the accuracy of data-capture methods. The administrator can then download the study data into HL7® FHIR®-formatted JSON files. In this paper, we illustrate how study coordinators can use SHAPE to design patient-centered studies. We demonstrate its broad applicability through a hypothetical type 1 diabetes cohort study and an ongoing pilot study on metachromatic leukodystrophy to implement best practices for designing a regulatory-grade natural history study for rare diseases.

CONCLUSIONS: SHAPE is an intuitive and comprehensive data-collection tool for a variety of clinical studies. Further customization of this versatile and scalable platform allows for multiple use cases. SHAPE can capture patient perspectives and clinical data, thereby providing regulators, clinicians, researchers, and patient advocacy organizations with data to inform drug development and improve patient outcomes.}, } @article {pmid36100587, year = {2022}, author = {Wang, C and Kon, WY and Ng, HJ and Lim, CC}, title = {Experimental symmetric private information retrieval with measurement-device-independent quantum network.}, journal = {Light, science & applications}, volume = {11}, number = {1}, pages = {268}, pmid = {36100587}, issn = {2047-7538}, abstract = {Secure information retrieval is an essential task in today's highly digitised society. In some applications, it may be necessary that user query's privacy and database content's security are enforced. For these settings, symmetric private information retrieval (SPIR) could be employed, but its implementation is known to be demanding, requiring a private key-exchange network as the base layer. Here, we report for the first time a realisation of provably-secure SPIR supported by a quantum-secure key-exchange network. The SPIR scheme looks at biometric security, offering secure retrieval of 582-byte fingerprint files from a database with 800 entries. Our experimental results clearly demonstrate the feasibility of SPIR with quantum secure communications, thereby opening up new possibilities in secure distributed data storage and cloud computing over the future Quantum Internet.}, } @article {pmid36093501, year = {2022}, author = {Ahamed Ahanger, T and Aldaej, A and Atiquzzaman, M and Ullah, I and Yousufudin, M}, title = {Distributed Blockchain-Based Platform for Unmanned Aerial Vehicles.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4723124}, pmid = {36093501}, issn = {1687-5273}, mesh = {*Blockchain ; Computer Communication Networks ; Computer Security ; Delivery of Health Care ; Unmanned Aerial Devices ; }, abstract = {Internet of Things (IoT)-inspired drone environment is having a greater influence on daily lives in the form of drone-based smart electricity monitoring, traffic routing, and personal healthcare. However, communication between drones and ground control systems must be protected to avoid potential vulnerabilities and improve coordination among scattered UAVs in the IoT context. In the current paper, a distributed UAV scheme is proposed that uses blockchain technology and a network topology similar to the IoT and cloud server to secure communications during data collection and transmission and reduce the likelihood of attack by maliciously manipulated UAVs. As an alternative to relying on a traditional blockchain approach, a unique, safe, and lightweight blockchain architecture is proposed that reduces computing and storage requirements while keeping privacy and security advantages. In addition, a unique reputation-based consensus protocol is built to assure the dependability of the decentralized network. Numerous types of transactions are established to characterize diverse data access. To validate the presented blockchain-based distributed system, performance evaluations are conducted to estimate the statistical effectiveness in the form of temporal delay, packet flow efficacy, precision, specificity, sensitivity, and security efficiency.}, } @article {pmid36093500, year = {2022}, author = {Zhu, G and Li, X and Zheng, C and Wang, L}, title = {Multimedia Fusion Privacy Protection Algorithm Based on IoT Data Security under Network Regulations.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {3574812}, pmid = {36093500}, issn = {1687-5273}, mesh = {Algorithms ; Computer Security ; Data Collection ; *Multimedia ; *Privacy ; }, abstract = {This study provides an in-depth analysis and research on multimedia fusion privacy protection algorithms based on IoT data security in a network regulation environment. Aiming at the problem of collusion and conspiracy to deceive users in the process of outsourced computing and outsourced verification, a safe, reliable, and collusion-resistant scheme based on blockchain is studied for IoT outsourced data computing and public verification, with the help of distributed storage methods, where smart devices encrypt the collected data and upload them to the DHT for storage along with the results of this data given by the cloud server. After testing, the constructed model has a privacy-preserving budget value of 0.6 and the smallest information leakage ratio of multimedia fusion data based on IoT data security when the decision tree depth is 6. After using this model under this condition, the maximum value of the information leakage ratio of multimedia fusion data based on IoT data security is reduced from 0.0865 to 0.003, and the data security is significantly improved. In the consensus verification process, to reduce the consensus time and ensure the operating efficiency of the system, a consensus node selection algorithm is proposed, thereby reducing the time complexity of the consensus. Based on the smart grid application scenario, the security and performance of the proposed model are analyzed. This study proves the correctness of this scheme by using BAN logic and proves the security of this scheme under the stochastic prediction machine model. Finally, this study compares the security aspects and performance aspects of the scheme with some existing similar schemes and shows that the scheme is feasible under IoT.}, } @article {pmid36093488, year = {2022}, author = {Alyami, J and Sadad, T and Rehman, A and Almutairi, F and Saba, T and Bahaj, SA and Alkhurim, A}, title = {Cloud Computing-Based Framework for Breast Tumor Image Classification Using Fusion of AlexNet and GLCM Texture Features with Ensemble Multi-Kernel Support Vector Machine (MK-SVM).}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7403302}, pmid = {36093488}, issn = {1687-5273}, mesh = {Aged ; *Breast Neoplasms/diagnostic imaging ; Cloud Computing ; Diagnosis, Computer-Assisted/methods ; Female ; Humans ; Image Processing, Computer-Assisted/methods ; *Support Vector Machine ; }, abstract = {Breast cancer is common among women all over the world. Early identification of breast cancer lowers death rates. However, it is difficult to determine whether these are cancerous or noncancerous lesions due to their inconsistencies in image appearance. Machine learning techniques are widely employed in imaging analysis as a diagnostic method for breast cancer classification. However, patients cannot take advantage of remote areas as these systems are unavailable on clouds. Thus, breast cancer detection for remote patients is indispensable, which can only be possible through cloud computing. The user is allowed to feed images into the cloud system, which is further investigated through the computer aided diagnosis (CAD) system. Such systems could also be used to track patients, older adults, especially with disabilities, particularly in remote areas of developing countries that do not have medical facilities and paramedic staff. In the proposed CAD system, a fusion of AlexNet architecture and GLCM (gray-level cooccurrence matrix) features are used to extract distinguishable texture features from breast tissues. Finally, to attain higher precision, an ensemble of MK-SVM is used. For testing purposes, the proposed model is applied to the MIAS dataset, a commonly used breast image database, and achieved 96.26% accuracy.}, } @article {pmid36093280, year = {2022}, author = {Xie, Y and Zhang, K and Kou, H and Mokarram, MJ}, title = {Private anomaly detection of student health conditions based on wearable sensors in mobile cloud computing.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {11}, number = {1}, pages = {38}, pmid = {36093280}, issn = {2192-113X}, abstract = {With the continuous spread of COVID-19 virus, how to guarantee the healthy living of people especially the students who are of relative weak physique is becoming a key research issue of significant values. Specifically, precise recognition of the anomaly in student health conditions is beneficial to the quick discovery of potential patients. However, there are so many students in each school that the education managers cannot know about the health conditions of students in a real-time manner and accurately recognize the possible anomaly among students quickly. Fortunately, the quick development of mobile cloud computing technologies and wearable sensors has provided a promising way to monitor the real-time health conditions of students and find out the anomalies timely. However, two challenges are present in the above anomaly detection issue. First, the health data monitored by massive wearable sensors are often massive and updated frequently, which probably leads to high sensor-cloud transmission cost for anomaly detection. Second, the health data of students are often sensitive enough, which probably impedes the integration of health data in cloud environment even renders the health data-based anomaly detection infeasible. In view of these challenges, we propose a time-efficient and privacy-aware anomaly detection solution for students with wearable sensors in mobile cloud computing environment. At last, we validate the effectiveness and efficiency of our work via a set of simulated experiments.}, } @article {pmid36092002, year = {2022}, author = {Vadde, U and Kompalli, VS}, title = {Energy efficient service placement in fog computing.}, journal = {PeerJ. Computer science}, volume = {8}, number = {}, pages = {e1035}, pmid = {36092002}, issn = {2376-5992}, abstract = {The Internet of Things (IoT) concept evolved into a slew of applications. To satisfy the requests of these applications, using cloud computing is troublesome because of the high latency caused by the distance between IoT devices and cloud resources. Fog computing has become promising with its geographically distributed infrastructure for providing resources using fog nodes near IoT devices, thereby reducing the bandwidth and latency. A geographical distribution, heterogeneity and resource constraints of fog nodes introduce the key challenge of placing application modules/services in such a large scale infrastructure. In this work, we propose an improved version of the JAYA approach for optimal placement of modules that minimizes the energy consumption of a fog landscape. We analyzed the performance in terms of energy consumption, network usage, delays and execution time. Using iFogSim, we ran simulations and observed that our approach reduces on average 31% of the energy consumption compared to modern methods.}, } @article {pmid36091662, year = {2023}, author = {Singh, A and Chatterjee, K}, title = {Edge computing based secure health monitoring framework for electronic healthcare system.}, journal = {Cluster computing}, volume = {26}, number = {2}, pages = {1205-1220}, pmid = {36091662}, issn = {1386-7857}, abstract = {Nowadays, Smart Healthcare Systems (SHS) are frequently used by people for personal healthcare observations using various smart devices. The SHS uses IoT technology and cloud infrastructure for data capturing, transmitting it through smart devices, data storage, processing, and healthcare advice. Processing such a huge amount of data from numerous IoT devices in a short time is quite challenging. Thus, technological frameworks such as edge computing or fog computing can be used as a middle layer between cloud and user in SHS. It reduces the response time for data processing at the lower level (edge level). But, Edge of Things (EoT) also suffers from security and privacy issues. A robust healthcare monitoring framework with secure data storage and access is needed. It will provide a quick response in case of the production of abnormal data and store/access the sensitive data securely. This paper proposed a Secure Framework based on the Edge of Things (SEoT) for Smart healthcare systems. This framework is mainly designed for real-time health monitoring, maintaining the security and confidentiality of the healthcare data in a controlled manner. This paper included clustering approaches for analyzing bio-signal data for abnormality detection and Attribute-Based Encryption (ABE) for bio-signal data security and secure access. The experimental results of the proposed framework show improved performance with maintaining the accuracy of up to 98.5% and data security.}, } @article {pmid36091551, year = {2022}, author = {Guo, C and Li, H}, title = {Application of 5G network combined with AI robots in personalized nursing in China: A literature review.}, journal = {Frontiers in public health}, volume = {10}, number = {}, pages = {948303}, pmid = {36091551}, issn = {2296-2565}, mesh = {Artificial Intelligence ; China ; Delivery of Health Care ; Humans ; *Robotics ; *Telemedicine ; }, abstract = {The medical and healthcare industry is currently developing into digitization. Attributed to the rapid development of advanced technologies such as the 5G network, cloud computing, artificial intelligence (AI), and big data, and their wide applications in the medical industry, the medical model is shifting into an intelligent one. By combining the 5G network with cloud healthcare platforms and AI, nursing robots can effectively improve the overall medical efficacy. Meanwhile, patients can enjoy personalized medical services, the supply and the sharing of medical and healthcare services are promoted, and the digital transformation of the healthcare industry is accelerated. In this paper, the application and practice of 5G network technology in the medical industry are introduced, including telecare, 5G first-aid remote medical service, and remote robot applications. Also, by combining application characteristics of AI and development requirements of smart healthcare, the overall planning, intelligence, and personalization of the 5G network in the medical industry, as well as opportunities and challenges of its application in the field of nursing are discussed. This paper provides references to the development and application of 5G network technology in the field of medical service.}, } @article {pmid36086197, year = {2022}, author = {Amin, AB and Wang, S and David, U and Noh, Y}, title = {Applicability of Cloud Native-based Healthcare Monitoring Platform (CN-HMP) in Older Adult Facilities.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2022}, number = {}, pages = {2684-2688}, doi = {10.1109/EMBC48229.2022.9871998}, pmid = {36086197}, issn = {2694-0604}, mesh = {Aged ; *Cloud Computing ; Computer Communication Networks ; *Delivery of Health Care ; Electrocardiography ; Health Facilities ; Humans ; }, abstract = {Over the past few decades, the world has faced the huge demographic change in the aging population, which makes significant challenges in healthcare systems. The increasing older adult population along with the current health workforce shortage creates a struggling situation for current facilities and personnel to meet the demand. To tackle this situation, cloud computing is a fast-growing area in digital healthcare and it allows to settle up a modern distributed system environment, capable of scaling to tens of thousands of self healing multitenant nodes for healthcare applications. In addition, cloud native architecture is recently getting focused as an ideal structure for multi-node based healthcare monitoring system due to its high scalability, low latency, and rapid and stable maintainability. In this study, we proposed a cloud native-based rapid, robust, and productive digital healthcare platform which allows to manage and care for a large number of patient groups. To validate our platform, we simulated our Cloud Nativebased Healthcare Monitoring Platform (CN-HMP) with real-time setup and evaluated the performance in terms of request response time, data packets delivery, and end-to-end latency. We found it showing less than 0.1 ms response time in at least 92.5% of total requests up to 3K requests, and no data packet loss along with more than 28% of total data packets with no latency and only ≈ 0.6% of those with maximum latency (3 ms) in 24-hour observation. Clinical Relevance- This study and relevant experiment demonstrate the suitability of the CN-HMP to support providers and nurses for elderly patients healthcare with regular monitoring in older adult facilities.}, } @article {pmid36082003, year = {2021}, author = {Aghababaei, M and Ebrahimi, A and Naghipour, AA and Asadi, E and Verrelst, J}, title = {Vegetation Types Mapping Using Multi-Temporal Landsat Images in the Google Earth Engine Platform.}, journal = {Remote sensing}, volume = {13}, number = {22}, pages = {4683}, pmid = {36082003}, issn = {2072-4292}, support = {755617/ERC_/European Research Council/International ; }, abstract = {Vegetation Types (VTs) are important managerial units, and their identification serves as essential tools for the conservation of land covers. Despite a long history of Earth observation applications to assess and monitor land covers, the quantitative detection of sparse VTs remains problematic, especially in arid and semiarid areas. This research aimed to identify appropriate multi-temporal datasets to improve the accuracy of VTs classification in a heterogeneous landscape in Central Zagros, Iran. To do so, first the Normalized Difference Vegetation Index (NDVI) temporal profile of each VT was identified in the study area for the period of 2018, 2019, and 2020. This data revealed strong seasonal phenological patterns and key periods of VTs separation. It led us to select the optimal time series images to be used in the VTs classification. We then compared single-date and multi-temporal datasets of Landsat 8 images within the Google Earth Engine (GEE) platform as the input to the Random Forest classifier for VTs detection. The single-date classification gave a median Overall Kappa (OK) and Overall Accuracy (OA) of 51% and 64%, respectively. Instead, using multi-temporal images led to an overall kappa accuracy of 74% and an overall accuracy of 81%. Thus, the exploitation of multi-temporal datasets favored accurate VTs classification. In addition, the presented results underline that available open access cloud-computing platforms such as the GEE facilitates identifying optimal periods and multitemporal imagery for VTs classification.}, } @article {pmid36081832, year = {2022}, author = {Estévez, J and Salinero-Delgado, M and Berger, K and Pipia, L and Rivera-Caicedo, JP and Wocher, M and Reyes-Muñoz, P and Tagliabue, G and Boschetti, M and Verrelst, J}, title = {Gaussian processes retrieval of crop traits in Google Earth Engine based on Sentinel-2 top-of-atmosphere data.}, journal = {Remote sensing of environment}, volume = {273}, number = {}, pages = {112958}, pmid = {36081832}, issn = {0034-4257}, support = {755617/ERC_/European Research Council/International ; }, abstract = {The unprecedented availability of optical satellite data in cloud-based computing platforms, such as Google Earth Engine (GEE), opens new possibilities to develop crop trait retrieval models from the local to the planetary scale. Hybrid retrieval models are of interest to run in these platforms as they combine the advantages of physically- based radiative transfer models (RTM) with the flexibility of machine learning regression algorithms. Previous research with GEE primarily relied on processing bottom-of-atmosphere (BOA) reflectance data, which requires atmospheric correction. In the present study, we implemented hybrid models directly into GEE for processing Sentinel-2 (S2) Level-1C (L1C) top-of-atmosphere (TOA) reflectance data into crop traits. To achieve this, a training dataset was generated using the leaf-canopy RTM PROSAIL in combination with the atmospheric model 6SV. Gaussian process regression (GPR) retrieval models were then established for eight essential crop traits namely leaf chlorophyll content, leaf water content, leaf dry matter content, fractional vegetation cover, leaf area index (LAI), and upscaled leaf variables (i.e., canopy chlorophyll content, canopy water content and canopy dry matter content). An important pre-requisite for implementation into GEE is that the models are sufficiently light in order to facilitate efficient and fast processing. Successful reduction of the training dataset by 78% was achieved using the active learning technique Euclidean distance-based diversity (EBD). With the EBD-GPR models, highly accurate validation results of LAI and upscaled leaf variables were obtained against in situ field data from the validation study site Munich-North-Isar (MNI), with normalized root mean square errors (NRMSE) from 6% to 13%. Using an independent validation dataset of similar crop types (Italian Grosseto test site), the retrieval models showed moderate to good performances for canopy-level variables, with NRMSE ranging from 14% to 50%, but failed for the leaf-level estimates. Obtained maps over the MNI site were further compared against Sentinel-2 Level 2 Prototype Processor (SL2P) vegetation estimates generated from the ESA Sentinels' Application Platform (SNAP) Biophysical Processor, proving high consistency of both retrievals (R [2] from 0.80 to 0.94). Finally, thanks to the seamless GEE processing capability, the TOA-based mapping was applied over the entirety of Germany at 20 m spatial resolution including information about prediction uncertainty. The obtained maps provided confidence of the developed EBD-GPR retrieval models for integration in the GEE framework and national scale mapping from S2-L1C imagery. In summary, the proposed retrieval workflow demonstrates the possibility of routine processing of S2 TOA data into crop traits maps at any place on Earth as required for operational agricultural applications.}, } @article {pmid36081813, year = {2021}, author = {Salinero-Delgado, M and Estévez, J and Pipia, L and Belda, S and Berger, K and Gómez, VP and Verrelst, J}, title = {Monitoring Cropland Phenology on Google Earth Engine Using Gaussian Process Regression.}, journal = {Remote sensing}, volume = {14}, number = {1}, pages = {146}, pmid = {36081813}, issn = {2072-4292}, support = {755617/ERC_/European Research Council/International ; }, abstract = {Monitoring cropland phenology from optical satellite data remains a challenging task due to the influence of clouds and atmospheric artifacts. Therefore, measures need to be taken to overcome these challenges and gain better knowledge of crop dynamics. The arrival of cloud computing platforms such as Google Earth Engine (GEE) has enabled us to propose a Sentinel-2 (S2) phenology end-to-end processing chain. To achieve this, the following pipeline was implemented: (1) the building of hybrid Gaussian Process Regression (GPR) retrieval models of crop traits optimized with active learning, (2) implementation of these models on GEE (3) generation of spatiotemporally continuous maps and time series of these crop traits with the use of gap-filling through GPR fitting, and finally, (4) calculation of land surface phenology (LSP) metrics such as the start of season (SOS) or end of season (EOS). Overall, from good to high performance was achieved, in particular for the estimation of canopy-level traits such as leaf area index (LAI) and canopy chlorophyll content, with normalized root mean square errors (NRMSE) of 9% and 10%, respectively. By means of the GPR gap-filling time series of S2, entire tiles were reconstructed, and resulting maps were demonstrated over an agricultural area in Castile and Leon, Spain, where crop calendar data were available to assess the validity of LSP metrics derived from crop traits. In addition, phenology derived from the normalized difference vegetation index (NDVI) was used as reference. NDVI not only proved to be a robust indicator for the calculation of LSP metrics, but also served to demonstrate the good phenology quality of the quantitative trait products. Thanks to the GEE framework, the proposed workflow can be realized anywhere in the world and for any time window, thus representing a shift in the satellite data processing paradigm. We anticipate that the produced LSP metrics can provide meaningful insights into crop seasonal patterns in a changing environment that demands adaptive agricultural production.}, } @article {pmid36081177, year = {2022}, author = {Kum, S and Oh, S and Yeom, J and Moon, J}, title = {Optimization of Edge Resources for Deep Learning Application with Batch and Model Management.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {17}, pages = {}, pmid = {36081177}, issn = {1424-8220}, support = {2021-0-01578//Institute for Information and Communications Technology Promotion/ ; }, mesh = {*Deep Learning ; Workload ; }, abstract = {As deep learning technology paves its way, real-world applications that make use of it become popular these days. Edge computing architecture is one of the service architectures to realize the deep learning based service, which makes use of the resources near the data source or client. In Edge computing architecture it becomes important to manage resource usage, and there is research on optimization of deep learning, such as pruning or binarization, which makes deep learning models more lightweight, along with the research for the efficient distribution of workloads on cloud or edge resources. Those are to reduce the workload on edge resources. In this paper, a usage optimization method with batch and model management is proposed. The proposed method is to increase the utilization of GPU resource by modifying the batch size of the input of an inference application. To this end, the inference pipelines are identified to see how the different kinds of resources are used, and then the effect of batch inference on GPU is measured. The proposed method consists of a few modules, including a tool for batch size management which is able to change a batch size with respect to the available resources, and another one for model management which supports on-the-fly update of a model. The proposed methods are implemented on a real-time video analysis application and deployed in the Kubernetes cluster as a Docker container. The result shows that the proposed method can optimize the usage of edge resources for real-time video analysis deep learning applications.}, } @article {pmid36081143, year = {2022}, author = {Strigaro, D and Cannata, M and Lepori, F and Capelli, C and Lami, A and Manca, D and Seno, S}, title = {Open and Cost-Effective Digital Ecosystem for Lake Water Quality Monitoring.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {17}, pages = {}, pmid = {36081143}, issn = {1424-8220}, support = {523544//European Commission/ ; 523544//Repubblica e Cantone Ticino/ ; }, mesh = {Cost-Benefit Analysis ; *Ecosystem ; *Lakes ; Software ; Water Quality ; }, abstract = {In some sectors of the water resources management, the digital revolution process is slowed by some blocking factors such as costs, lack of digital expertise, resistance to change, etc. In addition, in the era of Big Data, many are the sources of information available in this field, but they are often not fully integrated. The adoption of different proprietary solutions to sense, collect and manage data is one of the main problems that hampers the availability of a fully integrated system. In this context, the aim of the project is to verify if a fully open, cost-effective and replicable digital ecosystem for lake monitoring can fill this gap and help the digitalization process using cloud based technology and an Automatic High-Frequency Monitoring System (AHFM) built using open hardware and software components. Once developed, the system is tested and validated in a real case scenario by integrating the historical databases and by checking the performance of the AHFM system. The solution applied the edge computing paradigm in order to move some computational work from server to the edge and fully exploiting the potential offered by low power consuming devices.}, } @article {pmid36081126, year = {2022}, author = {Azamuddin, WMH and Aman, AHM and Hassan, R and Mansor, N}, title = {Comparison of Named Data Networking Mobility Methodology in a Merged Cloud Internet of Things and Artificial Intelligence Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {17}, pages = {}, pmid = {36081126}, issn = {1424-8220}, support = {FRGS/1/2019/ICT03/UKM/02/1//National University of Malaysia/ ; }, mesh = {Artificial Intelligence ; Cloud Computing ; *Internet of Things ; Technology ; }, abstract = {In-network caching has evolved into a new paradigm, paving the way for the creation of Named Data Networking (NDN). Rather than simply being typical Internet technology, NDN serves a range of functions, with a focus on consumer-driven network architecture. The NDN design has been proposed as a method for replacing Internet Protocol (IP) addresses with identified content. This study adds to current research on NDN, artificial intelligence (AI), cloud computing, and the Internet of Things (IoT). The core contribution of this paper is the merging of cloud IoT (C-IoT) and NDN-AI-IoT. To be precise, this study provides possible methodological and parameter explanations of the technologies via three methods: KITE, a producer mobility support scheme (PMSS), and hybrid network mobility (hybrid NeMO). KITE uses the indirection method to transmit content using simple NDN communication; the PMSS improves producer operation by reducing handover latency; and hybrid NeMO provides a binding information table to replace the base function of forwarding information. This study also describes mathematical equations for signaling cost and handover latency. Using the network simulator ndnSIM NS-3, this study highlights producer mobility operation. Mathematical equations for each methodology are developed based on the mobility scenario to measure handover latency and signaling cost. The results show that the efficiency of signaling cost for hybrid NeMO is approximately 4% better than that of KITE and the PMSS, while the handover latency for hybrid NeMO is 46% lower than that of KITE and approximately 60% lower than that of the PMSS.}, } @article {pmid36080827, year = {2022}, author = {McRae, MP and Rajsri, KS and Alcorn, TM and McDevitt, JT}, title = {Smart Diagnostics: Combining Artificial Intelligence and In Vitro Diagnostics.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {17}, pages = {}, pmid = {36080827}, issn = {1424-8220}, support = {R01 DE024392/DE/NIDCR NIH HHS/United States ; R01DE024392/NH/NIH HHS/United States ; 5 U01 DE017793-2/NH/NIH HHS/United States ; N/A//Renaissance Health Services Corporation/ ; R01DE031319-01/NH/NIH HHS/United States ; R01 DE031319/DE/NIDCR NIH HHS/United States ; N/A//Delta Dental of Michigan/ ; U01 DE017793/DE/NIDCR NIH HHS/United States ; N/A//Cancer Prevention and Research Institute of Texas/ ; U54 EB027690/EB/NIBIB NIH HHS/United States ; 3 U01 DE017793-02S1/NH/NIH HHS/United States ; 1RC2DE020785-01/NH/NIH HHS/United States ; R44 DE025798/DE/NIDCR NIH HHS/United States ; 5U54EB027690-04/NH/NIH HHS/United States ; RC2 DE020785/DE/NIDCR NIH HHS/United States ; 4R44DE 025798-02/NH/NIH HHS/United States ; }, mesh = {Artificial Intelligence ; *Biosensing Techniques ; *COVID-19/diagnosis ; COVID-19 Testing ; Humans ; Microfluidics ; Point-of-Care Systems ; }, abstract = {We are beginning a new era of Smart Diagnostics-integrated biosensors powered by recent innovations in embedded electronics, cloud computing, and artificial intelligence (AI). Universal and AI-based in vitro diagnostics (IVDs) have the potential to exponentially improve healthcare decision making in the coming years. This perspective covers current trends and challenges in translating Smart Diagnostics. We identify essential elements of Smart Diagnostics platforms through the lens of a clinically validated platform for digitizing biology and its ability to learn disease signatures. This platform for biochemical analyses uses a compact instrument to perform multiclass and multiplex measurements using fully integrated microfluidic cartridges compatible with the point of care. Image analysis digitizes biology by transforming fluorescence signals into inputs for learning disease/health signatures. The result is an intuitive Score reported to the patients and/or providers. This AI-linked universal diagnostic system has been validated through a series of large clinical studies and used to identify signatures for early disease detection and disease severity in several applications, including cardiovascular diseases, COVID-19, and oral cancer. The utility of this Smart Diagnostics platform may extend to multiple cell-based oncology tests via cross-reactive biomarkers spanning oral, colorectal, lung, bladder, esophageal, and cervical cancers, and is well-positioned to improve patient care, management, and outcomes through deployment of this resilient and scalable technology. Lastly, we provide a future perspective on the direction and trajectory of Smart Diagnostics and the transformative effects they will have on health care.}, } @article {pmid36079676, year = {2022}, author = {Shi, F and Zhou, B and Zhou, H and Zhang, H and Li, H and Li, R and Guo, Z and Gao, X}, title = {Spatial Autocorrelation Analysis of Land Use and Ecosystem Service Value in the Huangshui River Basin at the Grid Scale.}, journal = {Plants (Basel, Switzerland)}, volume = {11}, number = {17}, pages = {}, pmid = {36079676}, issn = {2223-7747}, support = {2019QZKK0105//the Second Qinghai-Tibet Plateau Scientific Expedition and Research Program/ ; U21A2021//the National Natural Science Foundation of China/ ; 2021-ZJ-913//the Natural Science Foundation of Qinghai Province of China/ ; }, abstract = {The Huangshui River Basin is one of the most densely populated areas on the Qinghai-Tibet Plateau and is characterized by a high level of human activity. The contradiction between ecological protection and socioeconomic development has become increasingly prominent; determining how to achieve the balanced and coordinated development of the Huangshui River Basin is an important task. Thus, this study used the Google Earth Engine (GEE) cloud-computing platform and Sentinel-1/2 data, supplemented with an ALOS digital elevation model (ALOS DEM) and field survey data, and combined a remote sensing classification method, grid method, and ecosystem service value (ESV) evaluation method to study the spatial correlation and interaction between land use (LU) and ESV in the Huangshui River Basin. The following results were obtained: (1) on the GEE platform, Sentinel-1/2 active and passive remote sensing data, combined with the gradient tree-boosting algorithm, can efficiently produce highly accurate LU data with a spatial resolution of 10 m in the Huangshui River Basin; the overall accuracy (OA) reached 88%. (2) The total ESV in the Huangshui River Basin in 2020 was CNY 33.18 billion (USD 4867.2 million), of which woodland and grassland were the main contributors to ESV. In the Huangshui River Basin, the LU type, LU degree, and ESV have significant positive spatial correlations, with urban and agricultural areas showing an H-H agglomeration in terms of LU degree, with woodlands, grasslands, reservoirs, and wetlands showing an H-H agglomeration in terms of ESV. (3) There is a significant negative spatial correlation between the LU degree and ESV in the Huangshui River Basin, indicating that the enhancement of the LU degree in the basin could have a negative spatial spillover effect on the ESV of surrounding areas. Thus, green development should be the future direction of progress in the Huangshui River Basin, i.e., while maintaining and expanding the land for ecological protection and restoration, and the LU structure should be actively adjusted to ensure ecological security and coordinated and sustainable socioeconomic development in the Basin.}, } @article {pmid36078329, year = {2022}, author = {Feng, H and Wang, F and Song, G and Liu, L}, title = {Digital Transformation on Enterprise Green Innovation: Effect and Transmission Mechanism.}, journal = {International journal of environmental research and public health}, volume = {19}, number = {17}, pages = {}, pmid = {36078329}, issn = {1660-4601}, mesh = {China ; Financing, Government ; *Government ; *Sustainable Development ; }, abstract = {With the development of blockchain, big data, cloud computing and other new technologies, how to achieve innovative development and green sustainable development in digital transformation has become one of the key issues for enterprises to obtain and maintain core competitiveness. However, little of the literature has paid attention to the impact of digital transformation on enterprise green innovation. Using the data of Chinese A-share listed companies from 2010 to 2020, this paper empirically analyzes the impact of enterprise digital transformation on green innovation and its transmission mechanism, by constructing double fixed-effect models. The results show that digital transformation has remarkably promoted the green innovation of enterprises. R&D investment, government subsidies, and income tax burden have played a conductive role between digital transformation and enterprise green innovation. Furthermore, digital transformation can significantly promote the high-quality green innovation of enterprises and also plays a more significant role in promoting the green innovation of high-tech enterprises and state-owned enterprises. A robustness test is carried out by using the lag data and changing the measurement methods of the dependent variable and independent variables, and the research conclusions are still valid. Based on resource-based theory and dynamic capability theory, this paper reveals the impact path of digital transformation on enterprise green innovation, further expanding the research field of digital transformation and enriching the research on the influencing factors of enterprise green innovation. This paper provides policy suggestions for the government to improve the enterprise green innovation level by increasing government subsidies and providing tax incentives and also provides reference for digital transformation enterprises to accelerate green innovation by increasing R&D investment, obtaining government subsidies, and acquiring tax policy support.}, } @article {pmid36075919, year = {2022}, author = {Sheffield, NC and Bonazzi, VR and Bourne, PE and Burdett, T and Clark, T and Grossman, RL and Spjuth, O and Yates, AD}, title = {From biomedical cloud platforms to microservices: next steps in FAIR data and analysis.}, journal = {Scientific data}, volume = {9}, number = {1}, pages = {553}, pmid = {36075919}, issn = {2052-4463}, support = {/WT_/Wellcome Trust/United Kingdom ; 201535/Z/16/Z/WT_/Wellcome Trust/United Kingdom ; R35 GM128636/GM/NIGMS NIH HHS/United States ; R35GM128636//U.S. Department of Health & Human Services | NIH | National Institute of General Medical Sciences (NIGMS)/ ; }, abstract = {The biomedical research community is investing heavily in biomedical cloud platforms. Cloud computing holds great promise for addressing challenges with big data and ensuring reproducibility in biology. However, despite their advantages, cloud platforms in and of themselves do not automatically support FAIRness. The global push to develop biomedical cloud platforms has led to new challenges, including platform lock-in, difficulty integrating across platforms, and duplicated effort for both users and developers. Here, we argue that these difficulties are systemic and emerge from incentives that encourage development effort on self-sufficient platforms and data repositories instead of interoperable microservices. We argue that many of these issues would be alleviated by prioritizing microservices and access to modular data in smaller chunks or summarized form. We propose that emphasizing modularity and interoperability would lead to a more powerful Unix-like ecosystem of web services for biomedical analysis and data retrieval. We challenge funders, developers, and researchers to support a vision to improve interoperability through microservices as the next generation of cloud-based bioinformatics.}, } @article {pmid36072746, year = {2022}, author = {Cheng, Q and Dang, CN}, title = {Using GIS Remote Sensing Image Data for Wetland Monitoring and Environmental Simulation.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7886358}, pmid = {36072746}, issn = {1687-5273}, mesh = {Artificial Intelligence ; Environmental Monitoring/methods ; Geographic Information Systems ; *Remote Sensing Technology ; *Wetlands ; }, abstract = {Through a comprehensive theoretical basis and actual test analysis of the application system design and functional efficiency of the cloud platform, this paper puts forward an artificial intelligence environmental data monitoring and wetland environmental simulation method based on GIS remote sensing images. First, the basic storage and computing functions have been enhanced at the physical layer. Second, the middleware layer is more flexible in the use of management methods and strategies. There are many strategies and methods that can be used in combination. Finally, based on this, the application system design framework is more convenient and faster so that you can focus on business logic, and the strategic advantages of certain functions are very obvious. The method of object-oriented classification and visual interpretation using UAV image data and satellite remote sensing images from the typical recovery area and treatment area of wetland from 2016 to 2020 is given in detail together to extract wetland information and use GIS software for dynamic calculation. Using the wetland transmission matrix method, the distribution map of the characteristic types of the survey areas in the four periods and the conversion status of the characteristic types at each stage were obtained, and the effect of wetland treatment was quantitatively studied.}, } @article {pmid36072717, year = {2022}, author = {Aggarwal, A and Kumar, S and Bhatt, A and Shah, MA}, title = {Solving User Priority in Cloud Computing Using Enhanced Optimization Algorithm in Workflow Scheduling.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7855532}, pmid = {36072717}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; Workflow ; }, abstract = {Cloud computing is a procedure of stockpiling as well as retrieval of data or computer services over the Internet that allows all its users to remotely access the data centers. Cloud computing provides all required services to the users, but every platform has its share of pros and cons, and another major problem in the cloud is task scheduling or workflow scheduling. Multiple factors are becoming a challenge for scheduling in cloud computing namely the heterogeneity of resources, tasks, and user priority. User priority has been encountered as the most challenging problem during the last decade as the number of users is increasing worldwide. This issue has been resolved by an advanced encryption standard (AES) algorithm, which decreases the response time and execution delay of the user-request. There are multifarious tasks, for instance, deploying the data on the cloud, that will be executed according to first come first serve (FCFS) and not on the payment basis, which provides an ease to the users. These investigated techniques are 30.21%, 25.20%, 25.30%, 30.25%, 24.26%, and 36.9 8% improved in comparison with the traditional FFOA, DE, ABC, PSO, GA, and ETC, respectively. Moreover, during iteration number 5, this approach is 15.20%, 20.22%, 30.56%, 26.30%, and 36.23% improved than that of the traditional techniques FFOA, DE, ABC, PSO, GA, and ETC, respectively. This investigated method is more efficient and applicable in certain arenas where user priority is the primary concern and can offer all the required services to the users without any interruption.}, } @article {pmid36065132, year = {2022}, author = {Feser, M and König, P and Fiebig, A and Arend, D and Lange, M and Scholz, U}, title = {On the way to plant data commons - a genotyping use case.}, journal = {Journal of integrative bioinformatics}, volume = {19}, number = {4}, pages = {}, pmid = {36065132}, issn = {1613-4516}, mesh = {*Ecosystem ; Genotype ; *Computational Biology ; Software ; }, abstract = {Over the last years it has been observed that the progress in data collection in life science has created increasing demand and opportunities for advanced bioinformatics. This includes data management as well as the individual data analysis and often covers the entire data life cycle. A variety of tools have been developed to store, share, or reuse the data produced in the different domains such as genotyping. Especially imputation, as a subfield of genotyping, requires good Research Data Management (RDM) strategies to enable use and re-use of genotypic data. To aim for sustainable software, it is necessary to develop tools and surrounding ecosystems, which are reusable and maintainable. Reusability in the context of streamlined tools can e.g. be achieved by standardizing the input and output of the different tools and adapting to open and broadly used file formats. By using such established file formats, the tools can also be connected with others, improving the overall interoperability of the software. Finally, it is important to build strong communities that maintain the tools by developing and contributing new features and maintenance updates. In this article, concepts for this will be presented for an imputation service.}, } @article {pmid36062125, year = {2022}, author = {Guan, J and Xu, H and Wang, Y and Ma, Y and Wang, Y and Gao, R and Yu, K}, title = {Digital Economy and Health: A Case Study of a Leading Enterprise's Value Mining Mode in the Global Big Health Market.}, journal = {Frontiers in public health}, volume = {10}, number = {}, pages = {904186}, pmid = {36062125}, issn = {2296-2565}, mesh = {Aged ; Bayes Theorem ; *COVID-19/epidemiology ; *Ecosystem ; Humans ; Industry ; }, abstract = {Coronavirus disease 2019 (COVID-19) swept across the world and posed a serious threat to human health. Health and elderly care enterprises are committed to continuously improving people's health. With the rapid development of the digital economy, many enterprises have established digital product-service ecosystems after combining "Internet +," big data, cloud computing, and the big health industry. This paper uses the case study method to analyze the overseas market value mining mode of health and elderly care enterprises through in-depth research on leading health and elderly care enterprises. This study explores the value mining mode of the leading enterprise's global big health market using a cluster analysis and Bayesian model with the support of data on geographical characteristics, users' sleep habits, and national big health. This paper theoretically summarizes the successful cases of health and elderly care enterprises through digital transformation, which provides a useful reference for the intelligent transformation of the health and elderly care industry.}, } @article {pmid36062066, year = {2022}, author = {Rufin, P and Bey, A and Picoli, M and Meyfroidt, P}, title = {Large-area mapping of active cropland and short-term fallows in smallholder landscapes using PlanetScope data.}, journal = {International journal of applied earth observation and geoinformation : ITC journal}, volume = {112}, number = {}, pages = {102937}, pmid = {36062066}, issn = {1569-8432}, abstract = {Cropland mapping in smallholder landscapes is challenged by complex and fragmented landscapes, labor-intensive and unmechanized land management causing high within-field variability, rapid dynamics in shifting cultivation systems, and substantial proportions of short-term fallows. To overcome these challenges, we here present a large-area mapping framework to identify active cropland and short-term fallows in smallholder landscapes for the 2020/2021 growing season at 4.77 m spatial resolution. Our study focuses on Northern Mozambique, an area comprising 381,698 km[2]. The approach is based on Google Earth Engine and time series of PlanetScope mosaics made openly available through Norwaýs International Climate and Forest Initiative (NICFI) data program. We conducted multi-temporal coregistration of the PlanetScope data using seasonal Sentinel-2 base images and derived consistent and gap-free seasonal time series metrics to classify active cropland and short-term fallows. An iterative active learning framework based on Random Forest class probabilities was used for training rare classes and uncertain regions. The map was accurate (area-adjusted overall accuracy 88.6% ± 1.5%), with the main error type being the commission of active cropland. Error-adjusted area estimates of active cropland extent (61,799.5 km[2] ± 4,252.5 km[2]) revealed that existing global and regional land cover products tend to under-, or over-estimate active cropland extent, respectively. Short-term fallows occupied 28.9% of the cropland in our reference sample (13% of the mapped cropland), with consolidated agricultural regions showing the highest shares of short-term fallows. Our approach relies on openly available PlanetScope data and cloud-based processing in Google Earth Engine, which minimizes financial constraints and maximizes replicability of the methods. All code and maps were made available for further use.}, } @article {pmid36061493, year = {2022}, author = {Zhou, D}, title = {Mobility and interlinkage: the transformation and new approaches for anthropological research.}, journal = {International journal of anthropology and ethnology}, volume = {6}, number = {1}, pages = {13}, doi = {10.1186/s41257-022-00072-x}, pmid = {36061493}, issn = {2366-1003}, abstract = {Mobility and interlinkage have become the most important characteristics of our time. The mobility and interlinkage of people, material and information constitute the way and rules of the operation of today's world. Internet links, cloud computing, complex database and human computation have changed the way people relate to the world, thus the anthropology for understanding and interpretation of human cultures have changed correspondingly. Cultures in the state of mobility and interlinkage, such as spatial changes, the evolution of interpersonal relationships and the new cultural order, have become a new subject.}, } @article {pmid36060618, year = {2023}, author = {Katal, A and Dahiya, S and Choudhury, T}, title = {Energy efficiency in cloud computing data centers: a survey on software technologies.}, journal = {Cluster computing}, volume = {26}, number = {3}, pages = {1845-1875}, pmid = {36060618}, issn = {1386-7857}, abstract = {Cloud computing is a commercial and economic paradigm that has gained traction since 2006 and is presently the most significant technology in IT sector. From the notion of cloud computing to its energy efficiency, cloud has been the subject of much discussion. The energy consumption of data centres alone will rise from 200 TWh in 2016 to 2967 TWh in 2030. The data centres require a lot of power to provide services, which increases CO2 emissions. In this survey paper, software-based technologies that can be used for building green data centers and include power management at individual software level has been discussed. The paper discusses the energy efficiency in containers and problem-solving approaches used for reducing power consumption in data centers. Further, the paper also gives details about the impact of data centers on environment that includes the e-waste and the various standards opted by different countries for giving rating to the data centers. This article goes beyond just demonstrating new green cloud computing possibilities. Instead, it focuses the attention and resources of academia and society on a critical issue: long-term technological advancement. The article covers the new technologies that can be applied at the individual software level that includes techniques applied at virtualization level, operating system level and application level. It clearly defines different measures at each level to reduce the energy consumption that clearly adds value to the current environmental problem of pollution reduction. This article also addresses the difficulties, concerns, and needs that cloud data centres and cloud organisations must grasp, as well as some of the factors and case studies that influence green cloud usage.}, } @article {pmid36059591, year = {2022}, author = {Moqurrab, SA and Tariq, N and Anjum, A and Asheralieva, A and Malik, SUR and Malik, H and Pervaiz, H and Gill, SS}, title = {A Deep Learning-Based Privacy-Preserving Model for Smart Healthcare in Internet of Medical Things Using Fog Computing.}, journal = {Wireless personal communications}, volume = {126}, number = {3}, pages = {2379-2401}, pmid = {36059591}, issn = {0929-6212}, abstract = {With the emergence of COVID-19, smart healthcare, the Internet of Medical Things, and big data-driven medical applications have become even more important. The biomedical data produced is highly confidential and private. Unfortunately, conventional health systems cannot support such a colossal amount of biomedical data. Hence, data is typically stored and shared through the cloud. The shared data is then used for different purposes, such as research and discovery of unprecedented facts. Typically, biomedical data appear in textual form (e.g., test reports, prescriptions, and diagnosis). Unfortunately, such data is prone to several security threats and attacks, for example, privacy and confidentiality breach. Although significant progress has been made on securing biomedical data, most existing approaches yield long delays and cannot accommodate real-time responses. This paper proposes a novel fog-enabled privacy-preserving model called δ r sanitizer, which uses deep learning to improve the healthcare system. The proposed model is based on a Convolutional Neural Network with Bidirectional-LSTM and effectively performs Medical Entity Recognition. The experimental results show that δ r sanitizer outperforms the state-of-the-art models with 91.14% recall, 92.63% in precision, and 92% F1-score. The sanitization model shows 28.77% improved utility preservation as compared to the state-of-the-art.}, } @article {pmid36059392, year = {2022}, author = {Srivastava, DK and Tiwari, PK and Srivastava, M and Dawadi, BR}, title = {An Energy-Efficient Strategy and Secure VM Placement Algorithm in Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {5324202}, pmid = {36059392}, issn = {1687-5273}, abstract = {One of the important and challenging tasks in cloud computing is to obtain the usefulness of cloud by implementing several specifications for our needs, to meet the present growing demands, and to minimize energy consumption as much as possible and ensure proper utilization of computing resources. An excellent mapping scheme has been derived which maps virtual machines (VMs) to physical machines (PMs), which is also known as virtual machine (VM) placement, and this needs to be implemented. The tremendous diversity of computing resources, tasks, and virtualization processes in the cloud causes the consolidation method to be more complex, tedious, and problematic. An algorithm for reducing energy use and resource allocation is proposed for implementation in this article. This algorithm was developed with the help of a Cloud System Model, which enables mapping between VMs and PMs and among tasks of VMs. The methodology used in this algorithm also supports lowering the number of PMs that are in an active state and optimizes the total time taken to process a set of tasks (also known as makespan time). Using the CloudSim Simulator tool, we evaluated and assessed the energy consumption and makespan time. The results are compiled and then compared graphically with respect to other existing energy-efficient VM placement algorithms.}, } @article {pmid36052034, year = {2022}, author = {Gan, B and Zhang, C}, title = {An Improved Model of Product Classification Feature Extraction and Recognition Based on Intelligent Image Recognition.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {2926669}, pmid = {36052034}, issn = {1687-5273}, mesh = {*Algorithms ; Cloud Computing ; Commerce ; Humans ; *Software ; }, abstract = {With the development of the new generation of technological revolution, the manufacturing industry has entered the era of intelligent manufacturing, and people have higher and higher requirements for the technology, industry, and application of product manufacturing. At present, some factories have introduced intelligent image recognition technology into the production process in order to meet the needs of customers' personalized customization. However, the current image recognition technology has limited capabilities. When faced with many special customized products or complex types of small batch products in the market, it is still impossible to perfectly analyze the product requirements and put them into production. Therefore, this paper conducts in-depth research on the improved model of product classification feature extraction and recognition based on intelligent image recognition: 3D modeling of the target product is carried out, and various data of the model are analyzed and recorded to facilitate subsequent work. Use the tools and the established 3D model tosimulate the parameters of the product in the real scene, and record them. Atthe same time, various methods such as image detection and edge analysis areused to maximize the accuracy of the obtained parameters, and variousalgorithms are used for cross-validation to obtain the correct rate of the obtaineddata, and the standard is 90% and above. Build a data platform, compare simulated data with display data by software and algorithm, and check by cloud computing force, so that the model data can be as close to the parameters of the real product as possible. Experimental results show that the algorithm has high accuracy and can meet the requirements of different classification prospects in actual production.}, } @article {pmid36048352, year = {2022}, author = {Jiang, F and Deng, M and Tang, J and Fu, L and Sun, H}, title = {Integrating spaceborne LiDAR and Sentinel-2 images to estimate forest aboveground biomass in Northern China.}, journal = {Carbon balance and management}, volume = {17}, number = {1}, pages = {12}, pmid = {36048352}, issn = {1750-0680}, support = {CX20210852//the Postgraduate Scientific Research Innovation Project of Hunan Province/ ; XLK201986//Scientific Research Fund of Hunan Provincial Forestry Department/ ; 31971578//the project of the National Natural Science Foundation of China/ ; }, abstract = {BACKGROUND: Fast and accurate forest aboveground biomass (AGB) estimation and mapping is the basic work of forest management and ecosystem dynamic investigation, which is of great significance to evaluate forest quality, resource assessment, and carbon cycle and management. The Ice, Cloud, and Land Elevation Satellite-2 (ICESat-2), as one of the latest launched spaceborne light detection and ranging (LiDAR) sensors, can penetrate the forest canopy and has the potential to obtain accurate forest vertical structure parameters on a large scale. However, the along-track segments of canopy height provided by ICESat-2 cannot be used to obtain comprehensive AGB spatial distribution. To make up for the deficiency of spaceborne LiDAR, the Sentinel-2 images provided by google earth engine (GEE) were used as the medium to integrate with ICESat-2 for continuous AGB mapping in our study. Ensemble learning can summarize the advantages of estimation models and achieve better estimation results. A stacking algorithm consisting of four non-parametric base models which are the backpropagation (BP) neural network, k-nearest neighbor (kNN), support vector machine (SVM), and random forest (RF) was proposed for AGB modeling and estimating in Saihanba forest farm, northern China.

RESULTS: The results show that stacking achieved the best AGB estimation accuracy among the models, with an R[2] of 0.71 and a root mean square error (RMSE) of 45.67 Mg/ha. The stacking resulted in the lowest estimation error with the decreases of RMSE by 22.6%, 27.7%, 23.4%, and 19.0% compared with those from the BP, kNN, SVM, and RF, respectively.

CONCLUSION: Compared with using Sentinel-2 alone, the estimation errors of all models have been significantly reduced after adding the LiDAR variables of ICESat-2 in AGB estimation. The research demonstrated that ICESat-2 has the potential to improve the accuracy of AGB estimation and provides a reference for dynamic forest resources management and monitoring.}, } @article {pmid36048148, year = {2022}, author = {Krissinel, E and Lebedev, AA and Uski, V and Ballard, CB and Keegan, RM and Kovalevskiy, O and Nicholls, RA and Pannu, NS and Skubák, P and Berrisford, J and Fando, M and Lohkamp, B and Wojdyr, M and Simpkin, AJ and Thomas, JMH and Oliver, C and Vonrhein, C and Chojnowski, G and Basle, A and Purkiss, A and Isupov, MN and McNicholas, S and Lowe, E and Triviño, J and Cowtan, K and Agirre, J and Rigden, DJ and Uson, I and Lamzin, V and Tews, I and Bricogne, G and Leslie, AGW and Brown, DG}, title = {CCP4 Cloud for structure determination and project management in macromolecular crystallography.}, journal = {Acta crystallographica. Section D, Structural biology}, volume = {78}, number = {Pt 9}, pages = {1079-1089}, pmid = {36048148}, issn = {2059-7983}, support = {BB/L007037/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; BB/S007040/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; BB/S007083/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; BB/S005099/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; BB/S007105/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; BBF020384/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; MC_UP_A025_1012/MRC_/Medical Research Council/United Kingdom ; MC_U105184325/MRC_/Medical Research Council/United Kingdom ; 349-2013-597//Röntgen-Ångström Cluster/ ; TKI 16219//Nederlandse Wetenschappelijke Organisatie/ ; }, mesh = {*Cloud Computing ; Crystallography, X-Ray ; Macromolecular Substances/chemistry ; *Software ; }, abstract = {Nowadays, progress in the determination of three-dimensional macromolecular structures from diffraction images is achieved partly at the cost of increasing data volumes. This is due to the deployment of modern high-speed, high-resolution detectors, the increased complexity and variety of crystallographic software, the use of extensive databases and high-performance computing. This limits what can be accomplished with personal, offline, computing equipment in terms of both productivity and maintainability. There is also an issue of long-term data maintenance and availability of structure-solution projects as the links between experimental observations and the final results deposited in the PDB. In this article, CCP4 Cloud, a new front-end of the CCP4 software suite, is presented which mitigates these effects by providing an online, cloud-based environment for crystallographic computation. CCP4 Cloud was developed for the efficient delivery of computing power, database services and seamless integration with web resources. It provides a rich graphical user interface that allows project sharing and long-term storage for structure-solution projects, and can be linked to data-producing facilities. The system is distributed with the CCP4 software suite version 7.1 and higher, and an online publicly available instance of CCP4 Cloud is provided by CCP4.}, } @article {pmid36046635, year = {2022}, author = {Nickel, S and Bremer, K and Dierks, ML and Haack, M and Wittmar, S and Borgetto, B and Kofahl, C}, title = {Digitization in health-related self-help - Results of an online survey among self-help organizations in Germany.}, journal = {Digital health}, volume = {8}, number = {}, pages = {20552076221120726}, pmid = {36046635}, issn = {2055-2076}, abstract = {BACKGROUND: Nowadays, much hope and expectations are associated with digitization in the health sector. The digital change also affects health-related self-help. A nationwide survey of self-help organizations (SHOs) aimed to show chances and limitations in the use of interactive IT tools like webforums, online meetings or social media as well as digital infrastructures for their organizational management. In this survey, we also determined whether SHO staff themselves have support and qualification needs with regard to this topic.

DESIGN: The online survey was conducted between 14 November and 8 December 2019, i.e., immediately before the outbreak of the Covid-19 pandemic. The questionnaire consisted of 50 questions consisting of 180 single items which could be answered in 30-40 min. After two reminder letters, 119 questionnaires of the SHOs were gathered and analysed.

RESULTS: SHOs already have a lot of experience with digital media/tools (e.g., own homepage, social media, cloud computing). Some tools are attested a "high" or "very high" benefit by more than 80% of users. Perceived benefits, however, are also facing a number of problems, ranging from lack of resources to data protection issues. Despite, or even because of the limits of digitization, there is great desire and need for support and further training in SHOs (and self-help groups).

CONCLUSIONS: At many points in the survey it was shown that digital media can be a useful extension of "traditional" collective self-help. Taking into account the risks and limitations associated with digital tools, SHOs can be central stakeholders in digitization in health-related self-help.

The study was financially supported by the Federal Ministry of Health, Germany. A detailed representation of the results is publicly available at: https://www.uke.de/dish.}, } @article {pmid36035822, year = {2022}, author = {Zala, K and Thakkar, HK and Jadeja, R and Dholakia, NH and Kotecha, K and Jain, DK and Shukla, M}, title = {On the Design of Secured and Reliable Dynamic Access Control Scheme of Patient E-Healthcare Records in Cloud Environment.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {3804553}, pmid = {36035822}, issn = {1687-5273}, mesh = {*Computer Security ; Confidentiality ; Delivery of Health Care ; Humans ; Privacy ; *Telemedicine ; }, abstract = {Traditional healthcare services have changed into modern ones in which doctors can diagnose patients from a distance. All stakeholders, including patients, ward boy, life insurance agents, physicians, and others, have easy access to patients' medical records due to cloud computing. The cloud's services are very cost-effective and scalable, and provide various mobile access options for a patient's electronic health records (EHRs). EHR privacy and security are critical concerns despite the many benefits of the cloud. Patient health information is extremely sensitive and important, and sending it over an unencrypted wireless media raises a number of security hazards. This study suggests an innovative and secure access system for cloud-based electronic healthcare services storing patient health records in a third-party cloud service provider. The research considers the remote healthcare requirements for maintaining patient information integrity, confidentiality, and security. There will be fewer attacks on e-healthcare records now that stakeholders will have a safe interface and data on the cloud will not be accessible to them. End-to-end encryption is ensured by using multiple keys generated by the key conclusion function (KCF), and access to cloud services is granted based on a person's identity and the relationship between the parties involved, which protects their personal information that is the methodology used in the proposed scheme. The proposed scheme is best suited for cloud-based e-healthcare services because of its simplicity and robustness. Using different Amazon EC2 hosting options, we examine how well our cloud-based web application service works when the number of requests linearly increases. The performance of our web application service that runs in the cloud is based on how many requests it can handle per second while keeping its response time constant. The proposed secure access scheme for cloud-based web applications was compared to the Ethereum blockchain platform, which uses internet of things (IoT) devices in terms of execution time, throughput, and latency.}, } @article {pmid36033780, year = {2022}, author = {Deng, C and Yu, Q and Luo, G and Zhao, Z and Li, Y}, title = {Big data-driven intelligent governance of college students' physical health: System and strategy.}, journal = {Frontiers in public health}, volume = {10}, number = {}, pages = {924025}, pmid = {36033780}, issn = {2296-2565}, mesh = {*Artificial Intelligence ; *Big Data ; Exercise ; Humans ; Students ; Surveys and Questionnaires ; }, abstract = {With the development of information technology, the application of a new generation of information technologies, such as big data, Internet Plus, and artificial intelligence, in the sports field is an emerging, novel trend. This paper examined the relevant research results and literature on physical education, computer science, pedagogy, management, and other disciplines, then used a self-made questionnaire to investigate the physical health status of Chinese college students. The big data were subsequently analyzed, which provided a scientific basis for the construction of an intelligent governance system for college students' physical health. Intelligent devices may be used to obtain big data resources, master the physical sports development and psychological status of college students, and push personalized sports prescriptions to solve the problems existing in college students' physical health. Research shows that there are four reasons for the continuous decline in Chinese college students' physical health levels. These are students' lack of positive exercise consciousness and healthy sports values (85.43%), a weak family sports concept and lack of physical exercise habits (62.76%), poor implementation of school sports policies (55.35%), and people's distorted sports value orientation (42.27%). Through the connecting effect of data, we can bring together the positive role of the government, school, society, family, and students so as to create an interlinked impact to promote students' physical health. The problems of insufficient platform utilization, lack of teaching resources, lagging research, and insufficient combination with big data in the intelligent governance of physical health of Chinese college students can be solved by building an intelligent governance system of physical health. Such a system would be composed of school infrastructure, data resources and technology processing, and intelligent service applications. Among these, school infrastructure refers to the material foundation and technical support. The material foundation includes perceptions, storage, computing, networks, and other equipment, and the technical support includes cloud computing, mobile Internet, the Internet of Things, artificial intelligence, and deep learning. Data resources refer to smart data, such as stadium data, physical health management data, and students' sports behavior data, which are mined from data resources such as students' physical development, physical health, and sports through big data technology and intelligent wearable devices. Intelligent managers provide efficient, intelligent, accurate, and personalized intelligent sports services for college students through data resource value mining, venue space-time optimization, health knowledge discovery, sports prescription pushes, etc. Finally, we put forward the development strategy for further deepening and improving the big data-driven intelligent governance system for college students' physical health. The intelligent governance system of physical health driven by big data and its development strategy can not only accurately guide and improve the physical health level of college students but also realize integrated teaching inside and outside physical education classes.}, } @article {pmid36033031, year = {2022}, author = {Liu, Y and Chen, L and Yao, Z}, title = {The application of artificial intelligence assistant to deep learning in teachers' teaching and students' learning processes.}, journal = {Frontiers in psychology}, volume = {13}, number = {}, pages = {929175}, pmid = {36033031}, issn = {1664-1078}, abstract = {With the emergence of big data, cloud computing, and other technologies, artificial intelligence (AI) technology has set off a new wave in the field of education. The application of AI technology to deep learning in university teachers' teaching and students' learning processes is an innovative way to promote the quality of teaching and learning. This study proposed the deep learning-based assessment to measure whether students experienced an improvement in terms of their mastery of knowledge, development of abilities, and emotional experiences. It also used comparative analysis of pre-tests and post-tests through online questionnaires to test the results. The impact of technology on teachers' teaching and students' learning processes, identified the problems in the teaching and learning processes in the context of the application of AI technology, and proposed strategies for reforming and optimizing teaching and learning. It recommends the application of software and platforms, such as Waston and Knewton, under the orientation of AI technology to improve efficiency in teaching and learning, optimize course design, and engage students in deep learning. The contribution of this research is that the teaching and learning processes will be enhanced by the use of intelligent and efficient teaching models on the teachers' side and personalized and in-depth learning on the students' side. On the one hand, the findings are helpful for teachers to better grasp the actual conditions of in-class teaching in real time, carry out intelligent lesson preparations, enrich teaching methods, improve teaching efficiency, and achieve personalized and precision teaching. On the other hand, it also provides a space of intelligent support for students with different traits in terms of learning and effectively improves students' innovation ability, ultimately achieving the purpose of "artificial intelligence + education."}, } @article {pmid36032802, year = {2022}, author = {Mi, J and Sun, X and Zhang, S and Liu, N}, title = {Residential Environment Pollution Monitoring System Based on Cloud Computing and Internet of Things.}, journal = {International journal of analytical chemistry}, volume = {2022}, number = {}, pages = {1013300}, pmid = {36032802}, issn = {1687-8760}, abstract = {In order to solve the problems of single monitoring factor, weak comprehensive analysis ability, and poor real time performance in traditional environmental monitoring systems, a research method of residential environment pollution monitoring system based on cloud computing and Internet of Things is proposed. The method mainly includes two parts: an environmental monitoring terminal and an environmental pollution monitoring and management platform. Through the Wi-Fi module, the data is sent to the environmental pollution monitoring and management platform in real time. The environmental monitoring management platform is mainly composed of environmental pollution monitoring server, web server, and mobile terminal. The results are as follows. The data measured by the system is close to the data measured by the instrument, and the overall error is small. The measurement error of harmful gases is about 6%. PM 2.5 is about 6.5%. Noise is about 1%. The average time for sensor data update is 0.762 s. The average alarm response time is 2 s. The average data transfer time is 2 s. Practice has proved that the environmental pollution monitoring and alarm system operates stably and can realize real-time collection and transmission of data such as noise, PM 2.5, harmful gas concentration, illumination, GPS, and video images, providing a reliable guarantee for timely environmental pollution control.}, } @article {pmid36017455, year = {2022}, author = {Venkateswarlu, Y and Baskar, K and Wongchai, A and Gauri Shankar, V and Paolo Martel Carranza, C and Gonzáles, JLA and Murali Dharan, AR}, title = {An Efficient Outlier Detection with Deep Learning-Based Financial Crisis Prediction Model in Big Data Environment.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4948947}, pmid = {36017455}, issn = {1687-5273}, mesh = {Algorithms ; *Big Data ; Cloud Computing ; *Deep Learning ; Machine Learning ; }, abstract = {As Big Data, Internet of Things (IoT), cloud computing (CC), and other ideas and technologies are combined for social interactions. Big data technologies improve the treatment of financial data for businesses. At present, an effective tool can be used to forecast the financial failures and crises of small and medium-sized enterprises. Financial crisis prediction (FCP) plays a major role in the country's economic phenomenon. Accurate forecasting of the number and probability of failure is an indication of the development and strength of national economies. Normally, distinct approaches are planned for an effective FCP. Conversely, classifier efficiency and predictive accuracy and data legality could not be optimal for practical application. In this view, this study develops an oppositional ant lion optimizer-based feature selection with a machine learning-enabled classification (OALOFS-MLC) model for FCP in a big data environment. For big data management in the financial sector, the Hadoop MapReduce tool is used. In addition, the presented OALOFS-MLC model designs a new OALOFS algorithm to choose an optimal subset of features which helps to achieve improved classification results. In addition, the deep random vector functional links network (DRVFLN) model is used to perform the grading process. Experimental validation of the OALOFS-MLC approach was conducted using a baseline dataset and the results demonstrated the supremacy of the OALOFS-MLC algorithm over recent approaches.}, } @article {pmid36016907, year = {2022}, author = {Reyes-Muñoz, P and Pipia, L and Salinero-Delgado, M and Belda, S and Berger, K and Estévez, J and Morata, M and Rivera-Caicedo, JP and Verrelst, J}, title = {Quantifying Fundamental Vegetation Traits over Europe Using the Sentinel-3 OLCI Catalogue in Google Earth Engine.}, journal = {Remote sensing}, volume = {14}, number = {6}, pages = {1347}, pmid = {36016907}, issn = {2072-4292}, support = {755617/ERC_/European Research Council/International ; }, abstract = {Thanks to the emergence of cloud-computing platforms and the ability of machine learning methods to solve prediction problems efficiently, this work presents a workflow to automate spatiotemporal mapping of essential vegetation traits from Sentinel-3 (S3) imagery. The traits included leaf chlorophyll content (LCC), leaf area index (LAI), fraction of absorbed photosynthetically active radiation (FAPAR), and fractional vegetation cover (FVC), being fundamental for assessing photosynthetic activity on Earth. The workflow involved Gaussian process regression (GPR) algorithms trained on top-of-atmosphere (TOA) radiance simulations generated by the coupled canopy radiative transfer model (RTM) SCOPE and the atmospheric RTM 6SV. The retrieval models, named to S3-TOA-GPR-1.0, were directly implemented in Google Earth Engine (GEE) to enable the quantification of the traits from TOA data as acquired from the S3 Ocean and Land Colour Instrument (OLCI) sensor.Following good to high theoretical validation results with normalized root mean square error (NRMSE) ranging from 5% (FAPAR) to 19% (LAI), a three fold evaluation approach over diverse sites and land cover types was pursued: (1) temporal comparison against LAI and FAPAR products obtained from Moderate Resolution Imaging Spectroradiometer (MODIS) for the time window 2016-2020, (2) spatial difference mapping with Copernicus Global Land Service (CGLS) estimates, and (3) direct validation using interpolated in situ data from the VALERI network. For all three approaches, promising results were achieved. Selected sites demonstrated coherent seasonal patterns compared to LAI and FAPAR MODIS products, with differences between spatially averaged temporal patterns of only 6.59%. In respect of the spatial mapping comparison, estimates provided by the S3-TOA-GPR-1.0 models indicated highest consistency with FVC and FAPAR CGLS products. Moreover, the direct validation of our S3-TOA-GPR-1.0 models against VALERI estimates indicated with regard to jurisdictional claims in good retrieval performance for LAI, FAPAR and FVC. We conclude that our retrieval workflow of spatiotemporal S3 TOA data processing into GEE opens the path towards global monitoring of fundamental vegetation traits, accessible to the whole research community.}, } @article {pmid36016060, year = {2022}, author = {Thilakarathne, NN and Bakar, MSA and Abas, PE and Yassin, H}, title = {A Cloud Enabled Crop Recommendation Platform for Machine Learning-Driven Precision Farming.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {16}, pages = {}, pmid = {36016060}, issn = {1424-8220}, mesh = {*Agriculture ; *Artificial Intelligence ; Crops, Agricultural ; Farms ; Machine Learning ; }, abstract = {Modern agriculture incorporated a portfolio of technologies to meet the current demand for agricultural food production, in terms of both quality and quantity. In this technology-driven farming era, this portfolio of technologies has aided farmers to overcome many of the challenges associated with their farming activities by enabling precise and timely decision making on the basis of data that are observed and subsequently converged. In this regard, Artificial Intelligence (AI) holds a key place, whereby it can assist key stakeholders in making precise decisions regarding the conditions on their farms. Machine Learning (ML), which is a branch of AI, enables systems to learn and improve from their experience without explicitly being programmed, by imitating intelligent behavior in solving tasks in a manner that requires low computational power. For the time being, ML is involved in a variety of aspects of farming, assisting ranchers in making smarter decisions on the basis of the observed data. In this study, we provide an overview of AI-driven precision farming/agriculture with related work and then propose a novel cloud-based ML-powered crop recommendation platform to assist farmers in deciding which crops need to be harvested based on a variety of known parameters. Moreover, in this paper, we compare five predictive ML algorithms-K-Nearest Neighbors (KNN), Decision Tree (DT), Random Forest (RF), Extreme Gradient Boosting (XGBoost) and Support Vector Machine (SVM)-to identify the best-performing ML algorithm on which to build our recommendation platform as a cloud-based service with the intention of offering precision farming solutions that are free and open source, as will lead to the growth and adoption of precision farming solutions in the long run.}, } @article {pmid36016017, year = {2022}, author = {Rocha Filho, GP and Brandão, AH and Nobre, RA and Meneguette, RI and Freitas, H and Gonçalves, VP}, title = {HOsT: Towards a Low-Cost Fog Solution via Smart Objects to Deal with the Heterogeneity of Data in a Residential Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {16}, pages = {}, pmid = {36016017}, issn = {1424-8220}, mesh = {*Environment ; }, abstract = {With the fast and unstoppable development of technology, the amount of available technological devices and the data they produce is overwhelming. In analyzing the context of a smart home, a diverse group of intelligent devices generating constant reports of its environment information is needed for the proper control of the house. Due to this demand, many possible solutions have been developed in the literature to assess the need for processing power and storage capacity. This work proposes HOsT (home-context-aware fog-computing solution)-a solution that addresses the problems of data heterogeneity and the interoperability of smart objects in the context of a smart home. HOsT was modeled to compose a set of intelligent objects to form a computational infrastructure in fog. A publish/subscribe communication module was implemented to abstract the details of communication between objects to disseminate heterogeneous information. A performance evaluation was carried out to validate HOsT. The results show evidence of efficiency in the communication infrastructure; and in the impact of HOsT compared with a cloud infrastructure. Furthermore, HOsT provides scalability about the number of devices acting simultaneously and demonstrates its ability to work with different devices.}, } @article {pmid36016014, year = {2022}, author = {Bemani, A and Björsell, N}, title = {Aggregation Strategy on Federated Machine Learning Algorithm for Collaborative Predictive Maintenance.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {16}, pages = {}, pmid = {36016014}, issn = {1424-8220}, support = {20202943//Region Gavleborg/ ; 20203291//EU - Tillvax Verket/ ; }, mesh = {*Algorithms ; Computer Simulation ; *Machine Learning ; Privacy ; Support Vector Machine ; }, abstract = {Industry 4.0 lets the industry build compact, precise, and connected assets and also has made modern industrial assets a massive source of data that can be used in process optimization, defining product quality, and predictive maintenance (PM). Large amounts of data are collected from machines, processed, and analyzed by different machine learning (ML) algorithms to achieve effective PM. These machines, assumed as edge devices, transmit their data readings to the cloud for processing and modeling. Transmitting massive amounts of data between edge and cloud is costly, increases latency, and causes privacy concerns. To address this issue, efforts have been made to use edge computing in PM applications., reducing data transmission costs and increasing processing speed. Federated learning (FL) has been proposed a mechanism that provides the ability to create a model from distributed data in edge, fog, and cloud layers without violating privacy and offers new opportunities for a collaborative approach to PM applications. However, FL has challenges in confronting with asset management in the industry, especially in the PM applications, which need to be considered in order to be fully compatible with these applications. This study describes distributed ML for PM applications and proposes two federated algorithms: Federated support vector machine (FedSVM) with memory for anomaly detection and federated long-short term memory (FedLSTM) for remaining useful life (RUL) estimation that enables factories at the fog level to maximize their PM models' accuracy without compromising their privacy. A global model at the cloud level has also been generated based on these algorithms. We have evaluated the approach using the Commercial Modular Aero-Propulsion System Simulation (CMAPSS) dataset to predict engines' RUL Experimental results demonstrate the advantage of FedSVM and FedLSTM in terms of model accuracy, model convergence time, and network usage resources.}, } @article {pmid36015736, year = {2022}, author = {Chen, YS and Cheng, KH and Hsu, CS and Zhang, HL}, title = {MiniDeep: A Standalone AI-Edge Platform with a Deep Learning-Based MINI-PC and AI-QSR System.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {16}, pages = {}, pmid = {36015736}, issn = {1424-8220}, support = {MOST 109-2221-E-305-004-MY3//Ministry of Science and Technology, Taiwan/ ; }, mesh = {*Artificial Intelligence ; *Deep Learning ; Software ; }, abstract = {In this paper, we present a new AI (Artificial Intelligence) edge platform, called "MiniDeep", which provides a standalone deep learning platform based on the cloud-edge architecture. This AI-Edge platform provides developers with a whole deep learning development environment to set up their deep learning life cycle processes, such as model training, model evaluation, model deployment, model inference, ground truth collecting, data pre-processing, and training data management. To the best of our knowledge, such a whole deep learning development environment has not been built before. MiniDeep uses Amazon Web Services (AWS) as the backend platform of a deep learning tuning management model. In the edge device, the OpenVino enables deep learning inference acceleration at the edge. To perform a deep learning life cycle job, MiniDeep proposes a mini deep life cycle (MDLC) system which is composed of several microservices from the cloud to the edge. MiniDeep provides Train Job Creator (TJC) for training dataset management and the models' training schedule and Model Packager (MP) for model package management. All of them are based on several AWS cloud services. On the edge device, MiniDeep provides Inference Handler (IH) to handle deep learning inference by hosting RESTful API (Application Programming Interface) requests/responses from the end device. Data Provider (DP) is responsible for ground truth collection and dataset synchronization for the cloud. With the deep learning ability, this paper uses the MiniDeep platform to implement a recommendation system for AI-QSR (Quick Service Restaurant) KIOSK (interactive kiosk) application. AI-QSR uses the MiniDeep platform to train an LSTM (Long Short-Term Memory)-based recommendation system. The LSTM-based recommendation system converts KIOSK UI (User Interface) flow to the flow sequence and performs sequential recommendations with food suggestions. At the end of this paper, the efficiency of the proposed MiniDeep is verified through real experiments. The experiment results have demonstrated that the proposed LSTM-based scheme performs better than the rule-based scheme in terms of purchase hit accuracy, categorical cross-entropy, precision, recall, and F1 score.}, } @article {pmid36015727, year = {2022}, author = {Alzahrani, A and Alyas, T and Alissa, K and Abbas, Q and Alsaawy, Y and Tabassum, N}, title = {Hybrid Approach for Improving the Performance of Data Reliability in Cloud Storage Management.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {16}, pages = {}, pmid = {36015727}, issn = {1424-8220}, mesh = {*Cloud Computing ; Computers ; *Information Storage and Retrieval ; Reproducibility of Results ; }, abstract = {The digital transformation disrupts the various professional domains in different ways, though one aspect is common: the unified platform known as cloud computing. Corporate solutions, IoT systems, analytics, business intelligence, and numerous tools, solutions and systems use cloud computing as a global platform. The migrations to the cloud are increasing, causing it to face new challenges and complexities. One of the essential segments is related to data storage. Data storage on the cloud is neither simplistic nor conventional; rather, it is becoming more and more complex due to the versatility and volume of data. The inspiration of this research is based on the development of a framework that can provide a comprehensive solution for cloud computing storage in terms of replication, and instead of using formal recovery channels, erasure coding has been proposed for this framework, which in the past proved itself as a trustworthy mechanism for the job. The proposed framework provides a hybrid approach to combine the benefits of replication and erasure coding to attain the optimal solution for storage, specifically focused on reliability and recovery. Learning and training mechanisms were developed to provide dynamic structure building in the future and test the data model. RAID architecture is used to formulate different configurations for the experiments. RAID-1 to RAID-6 are divided into two groups, with RAID-1 to 4 in the first group while RAID-5 and 6 are in the second group, further categorized based on FTT, parity, failure range and capacity. Reliability and recovery are evaluated on the rest of the data on the server side, and for the data in transit at the virtual level. The overall results show the significant impact of the proposed hybrid framework on cloud storage performance. RAID-6c at the server side came out as the best configuration for optimal performance. The mirroring for replication using RAID-6 and erasure coding for recovery work in complete coherence provide good results for the current framework while highlighting the interesting and challenging paths for future research.}, } @article {pmid36015699, year = {2022}, author = {Lakhan, A and Mohammed, MA and Abdulkareem, KH and Jaber, MM and Nedoma, J and Martinek, R and Zmij, P}, title = {Delay Optimal Schemes for Internet of Things Applications in Heterogeneous Edge Cloud Computing Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {16}, pages = {}, pmid = {36015699}, issn = {1424-8220}, support = {SP2022/18 and No. SP2022/34//Ministry of Education Youth and Sports/ ; CZ.02.1.01/0.0/0.0/17049/0008425//European Regional Development Fund in Research Platform focused on Industry 4.0 and Robotics in Ostrava/ ; }, mesh = {*Cloud Computing ; Delivery of Health Care ; *Internet of Things ; }, abstract = {Over the last decade, the usage of Internet of Things (IoT) enabled applications, such as healthcare, intelligent vehicles, and smart homes, has increased progressively. These IoT applications generate delayed- sensitive data and requires quick resources for execution. Recently, software-defined networks (SDN) offer an edge computing paradigm (e.g., fog computing) to run these applications with minimum end-to-end delays. Offloading and scheduling are promising schemes of edge computing to run delay-sensitive IoT applications while satisfying their requirements. However, in the dynamic environment, existing offloading and scheduling techniques are not ideal and decrease the performance of such applications. This article formulates joint and scheduling problems into combinatorial integer linear programming (CILP). We propose a joint task offloading and scheduling (JTOS) framework based on the problem. JTOS consists of task offloading, sequencing, scheduling, searching, and failure components. The study's goal is to minimize the hybrid delay of all applications. The performance evaluation shows that JTOS outperforms all existing baseline methods in hybrid delay for all applications in the dynamic environment. The performance evaluation shows that JTOS reduces the processing delay by 39% and the communication delay by 35% for IoT applications compared to existing schemes.}, } @article {pmid36009026, year = {2022}, author = {Lin, PC and Tsai, YS and Yeh, YM and Shen, MR}, title = {Cutting-Edge AI Technologies Meet Precision Medicine to Improve Cancer Care.}, journal = {Biomolecules}, volume = {12}, number = {8}, pages = {}, pmid = {36009026}, issn = {2218-273X}, mesh = {Artificial Intelligence ; Computational Biology/methods ; Data Mining ; Genomics/methods ; Humans ; *Neoplasms/diagnosis/genetics/therapy ; *Precision Medicine/methods ; }, abstract = {To provide precision medicine for better cancer care, researchers must work on clinical patient data, such as electronic medical records, physiological measurements, biochemistry, computerized tomography scans, digital pathology, and the genetic landscape of cancer tissue. To interpret big biodata in cancer genomics, an operational flow based on artificial intelligence (AI) models and medical management platforms with high-performance computing must be set up for precision cancer genomics in clinical practice. To work in the fast-evolving fields of patient care, clinical diagnostics, and therapeutic services, clinicians must understand the fundamentals of the AI tool approach. Therefore, the present article covers the following four themes: (i) computational prediction of pathogenic variants of cancer susceptibility genes; (ii) AI model for mutational analysis; (iii) single-cell genomics and computational biology; (iv) text mining for identifying gene targets in cancer; and (v) the NVIDIA graphics processing units, DRAGEN field programmable gate arrays systems and AI medical cloud platforms in clinical next-generation sequencing laboratories. Based on AI medical platforms and visualization, large amounts of clinical biodata can be rapidly copied and understood using an AI pipeline. The use of innovative AI technologies can deliver more accurate and rapid cancer therapy targets.}, } @article {pmid35996679, year = {2023}, author = {Alsalemi, A and Amira, A and Malekmohamadi, H and Diao, K}, title = {Lightweight Gramian Angular Field classification for edge internet of energy applications.}, journal = {Cluster computing}, volume = {26}, number = {2}, pages = {1375-1387}, pmid = {35996679}, issn = {1386-7857}, abstract = {UNLABELLED: With adverse industrial effects on the global landscape, climate change is imploring the global economy to adopt sustainable solutions. The ongoing evolution of energy efficiency targets massive data collection and Artificial Intelligence (AI) for big data analytics. Besides, emerging on the Internet of Energy (IoE) paradigm, edge computing is playing a rising role in liberating private data from cloud centralization. In this direction, a creative visual approach to understanding energy data is introduced. Building upon micro-moments, which are timeseries of small contextual data points, the power of pictorial representations to encapsulate rich information in a small two-dimensional (2D) space is harnessed through a novel Gramian Angular Fields (GAF) classifier for energy micro-moments. Designed with edge computing efficiency in mind, current testing results on the ODROID-XU4 can classify up to 7 million GAF-converted datapoints with ~ 90% accuracy in less than 30 s, paving the path towards industrial adoption of edge IoE.

SUPPLEMENTARY INFORMATION: The online version contains supplementary material available at 10.1007/s10586-022-03704-1.}, } @article {pmid35994872, year = {2022}, author = {Yeung, S and Kim, HK and Carleton, A and Munro, J and Ferguson, D and Monk, AP and Zhang, J and Besier, T and Fernandez, J}, title = {Integrating wearables and modelling for monitoring rehabilitation following total knee joint replacement.}, journal = {Computer methods and programs in biomedicine}, volume = {225}, number = {}, pages = {107063}, doi = {10.1016/j.cmpb.2022.107063}, pmid = {35994872}, issn = {1872-7565}, mesh = {*Arthroplasty, Replacement, Knee/rehabilitation ; Biomechanical Phenomena ; Gait ; Humans ; Knee Joint/surgery ; *Knee Prosthesis ; *Wearable Electronic Devices ; }, abstract = {BACKGROUND AND OBJECTIVE: Wearable inertial devices integrated with modelling and cloud computing have been widely adopted in the sports sector, however, their use in the health and medical field has yet to be fully realised. To date, there have been no reported studies concerning the use of wearables as a surrogate tool to monitor knee joint loading during recovery following a total knee joint replacement. The objective of this study is to firstly evaluate if peak tibial acceleration from wearables during gait is a good surrogate metric for computer modelling predicted functional knee loading; and secondly evaluate if traditional clinical patient related outcomes measures are consistent with wearable predictions.

METHODS: Following ethical approval, four healthy participants were used to establish the relationship between computer modelling predicted knee joint loading and wearable measured tibial acceleration. Following this, ten patients who had total knee joint replacements were then followed during their 6-week rehabilitation. Gait analysis, wearable acceleration, computer models of knee joint loading, and patient related outcomes measures including the Oxford knee score and range of motion were recorded.

RESULTS: A linear correlation (R[2] of 0.7-0.97) was observed between peak tibial acceleration (from wearables) and musculoskeletal model predicted knee joint loading during gait in healthy participants first. Whilst patient related outcome measures (Oxford knee score and patient range of motion) were observed to improve consistently during rehabilitation, this was not consistent with all patient's tibial acceleration. Only those patients that exhibited increasing peak tibial acceleration over 6-weeks rehabilitation were positively correlated with the Oxford knee score (R[2] of 0.51 to 0.97). Wearable predicted tibial acceleration revealed three patients with a consistent knee loading, five patients with improving knee loading, and two patients with declining knee loading during recovery. Hence, 20% of patients did not present with satisfactory joint loading following total knee joint replacement and this was not detected with current patient related outcome measures.

CONCLUSIONS: The use of inertial measurement units or wearables in this study provided additional insight into patients who were not exhibiting functional improvements in joint loading, and offers clinicians an 'off-site' early warning metric to identify potential complications during recovery and provide the opportunity for early intervention. This study has important implications for improving patient outcomes, equity, and for those who live in rural regions.}, } @article {pmid35992348, year = {2022}, author = {Xu, J and Xu, Z and Shi, B}, title = {Deep Reinforcement Learning Based Resource Allocation Strategy in Cloud-Edge Computing System.}, journal = {Frontiers in bioengineering and biotechnology}, volume = {10}, number = {}, pages = {908056}, pmid = {35992348}, issn = {2296-4185}, abstract = {The rapid development of mobile device applications put tremendous pressure on edge nodes with limited computing capabilities, which may cause poor user experience. To solve this problem, collaborative cloud-edge computing is proposed. In the cloud-edge computing, an edge node with limited local resources can rent more resources from a cloud node. According to the nature of cloud service, cloud service can be divided into private cloud and public cloud. In a private cloud environment, the edge node must allocate resources between the cloud node and the edge node. In a public cloud environment, since public cloud service providers offer various pricing modes for users' different computing demands, the edge node also must select the appropriate pricing mode of cloud service; which is a sequential decision problem. In this stydy, we model it as a Markov decision process and parameterized action Markov decision process, and we propose a resource allocation algorithm cost efficient resource allocation with private cloud (CERAI) and cost efficient resource allocation with public cloud (CERAU) in the collaborative cloud-edge environment based on the deep reinforcement learning algorithm deep deterministic policy gradient and P-DQN. Next, we evaluated CERAI and CERAU against three typical resource allocation algorithms based on synthetic and real data of Google datasets. The experimental results demonstrate that CERAI and CERAU can effectively reduce the long-term operating cost of collaborative cloud-side computing in various demanding settings. Our analysis can provide some useful insights for enterprises to design the resource allocation strategy in the collaborative cloud-side computing system.}, } @article {pmid35991356, year = {2022}, author = {de Oliveira, MEG and da Silva, MV and de Almeida, GLP and Pandorfi, H and Oliveira Lopes, PM and Manrique, DRC and Dos Santos, A and Jardim, AMDRF and Giongo, PR and Montenegro, AAA and da Silva Junior, CA and de Oliveira-Júnior, JF}, title = {Investigation of pre and post environmental impact of the lockdown (COVID-19) on the water quality of the Capibaribe and Tejipió rivers, Recife metropolitan region, Brazil.}, journal = {Journal of South American earth sciences}, volume = {118}, number = {}, pages = {103965}, pmid = {35991356}, issn = {0895-9811}, abstract = {The coronavirus pandemic has seriously affected human health, although some improvements on environmental indexes have temporarily occurred, due to changes on socio-cultural and economic standards. The objective of this study was to evaluate the impacts of the coronavirus and the influence of the lockdown associated with rainfall on the water quality of the Capibaribe and Tejipió rivers, Recife, Northeast Brazil, using cloud remote sensing on the Google Earth Engine (GEE) platform. The study was carried out based on eight representative images from Sentinel-2. Among the selected images, two refer to the year 2019 (before the pandemic), three refer to 2020 (during a pandemic), two from the lockdown period (2020), and one for the year 2021. The land use and land cover (LULC) and slope of the study region were determined and classified. Water turbidity data were subjected to descriptive and multivariate statistics. When analyzing the data on LULC for the riparian margin of the Capibaribe and Tejipió rivers, a low permanent preservation area was found, with a predominance of almost 100% of the urban area to which the deposition of soil particles in rivers are minimal. The results indicated that turbidity values in the water bodies varied from 6 mg. L[-1] up to 40 mg. L[-1]. Overall, the reduction in human-based activities generated by the lockdown enabled improvements in water quality of these urban rivers.}, } @article {pmid35990146, year = {2022}, author = {Li, J and Liu, L}, title = {The Reform of University Education Teaching Based on Cloud Computing and Big Data Background.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8169938}, pmid = {35990146}, issn = {1687-5273}, mesh = {*Big Data ; *Cloud Computing ; Humans ; Teaching ; Universities ; }, abstract = {In the era of big data and cloud computing, traditional college teaching model needs to be revolutionized in order to adapt to the needs of the present generation. The traditional college teaching model is currently facing unprecedented severe challenges which could be optimistically considered as a huge scope of development opportunity. In order to promote the gradual transformation of college teaching toward digitization, intelligence, and modernization, this paper comprehensively analyzes the impact of science and technology on college teaching. It further encourages the omnidirectional and multifaceted amalgamation of education with big data and cloud computing technology with an objective to improve the overall teaching level of colleges and universities. In order to realize the accurate evaluation of university teaching reform and improve teaching quality, the study presents an evaluation method of university teaching reform based on in-depth research network. Then, it further analyzes the main contents of university teaching reform, establishes the evaluation department of university teaching reform, and then establishes the evaluation model of university education reform. This is achieved by analyzing the relationship between university education reform and indicators using in-depth learning network followed by the development of simulation experiments pertinent to evaluation of university education reform. The results show that this method is helpful in improving the teaching quality.}, } @article {pmid35990138, year = {2022}, author = {Zhao, J and Zhang, L and Zhao, Y}, title = {Informatization of Accounting Systems in Small- and Medium-Sized Enterprises Based on Artificial Intelligence-Enabled Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6089195}, pmid = {35990138}, issn = {1687-5273}, mesh = {*Artificial Intelligence ; *Cloud Computing ; }, abstract = {Against the backdrop of China's growing market economy, small- and medium-sized enterprises (SMEs) have taken advantage of this opportunity to develop rapidly. At present, SMEs have become an important part of the market economy. Accounting system information management system is an advanced form of management, and improving the degree of accounting information is the key to improving the management mode of SMEs. This study applies cloud computing to enterprise accounting management systems. The results show that realizing SME accounting information management can effectively improve economic settlements. With the development of cloud computing, its improvement of accounting management efficiency cannot be ignored. Besides, the risks of accounting informatization, enterprises can make their development by establishing a secure network protection wall and relying on strict relevant laws and regulations.}, } @article {pmid35989835, year = {2022}, author = {Datta, PK and Chowdhury, SR and Aravindan, A and Nath, S and Sen, P}, title = {Looking for a Silver Lining to the Dark Cloud: A Google Trends Analysis of Contraceptive Interest in the United States Post Roe vs. Wade Verdict.}, journal = {Cureus}, volume = {14}, number = {7}, pages = {e27012}, pmid = {35989835}, issn = {2168-8184}, abstract = {Background In the wake of the recent Roe vs. Wade judgment, we performed a Google Trends analysis to identify the impact of this decision on the interests regarding contraceptive choices in the United States. Methods A Google Trends search between April 6 and July 5, 2022, with the United States as the area of interest, was performed using the five most popular contraception choices. In addition, a second trend search was performed using oral and injectable hormonal birth control measures. Results Trends showed a spike in interest regarding various contraceptive methods immediately following the verdict. The highest increase in interest was noted for "vasectomy," followed by "tubal ligation." With respect to oral and injectable birth control measures, "morning after pill" showed a marked spike in interest. Conclusion This verdict has triggered increased interest in contraceptive practices, which can be translated into better reproductive health with proper public health initiatives.}, } @article {pmid35978910, year = {2022}, author = {Tang, H and Jiang, G and Wang, Q}, title = {Prediction of College Students' Sports Performance Based on Improved BP Neural Network.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {5872384}, pmid = {35978910}, issn = {1687-5273}, mesh = {Algorithms ; *Athletic Performance ; Humans ; Neural Networks, Computer ; *Students ; Universities ; }, abstract = {Sports performance prediction has gradually become a research hotspot in various colleges and universities, and colleges and universities pay more and more attention to the development of college students' comprehensive quality. Aiming at the problems of low accuracy and slow convergence of the existing college students' sports performance prediction models, a method of college students' sports performance prediction based on improved BP neural network is proposed. First, preprocess the student's sports performance data, then use the BP neural network to train the data samples, optimize the selection of weights and thresholds in the neural network through the DE algorithm, and establish an optimal college student's sports performance prediction model, and then based on cloud computing, the platform implements and runs the sports performance prediction model, which speeds up the prediction of sports performance. The results show that the model can improve the accuracy of college students' sports performance prediction, provide more reliable prediction results, and provide valuable information for sports training.}, } @article {pmid35974742, year = {2022}, author = {Deumer, J and Pauw, BR and Marguet, S and Skroblin, D and Taché, O and Krumrey, M and Gollwitzer, C}, title = {Small-angle X-ray scattering: characterization of cubic Au nanoparticles using Debye's scattering formula.}, journal = {Journal of applied crystallography}, volume = {55}, number = {Pt 4}, pages = {993-1001}, pmid = {35974742}, issn = {0021-8898}, abstract = {A versatile software package in the form of a Python extension, named CDEF (computing Debye's scattering formula for extraordinary form factors), is proposed to calculate approximate scattering profiles of arbitrarily shaped nanoparticles for small-angle X-ray scattering (SAXS). CDEF generates a quasi-randomly distributed point cloud in the desired particle shape and then applies the open-source software DEBYER for efficient evaluation of Debye's scattering formula to calculate the SAXS pattern (https://github.com/j-from-b/CDEF). If self-correlation of the scattering signal is not omitted, the quasi-random distribution provides faster convergence compared with a true-random distribution of the scatterers, especially at higher momentum transfer. The usage of the software is demonstrated for the evaluation of scattering data of Au nanocubes with rounded edges, which were measured at the four-crystal monochromator beamline of PTB at the synchrotron radiation facility BESSY II in Berlin. The implementation is fast enough to run on a single desktop computer and perform model fits within minutes. The accuracy of the method was analyzed by comparison with analytically known form factors and verified with another implementation, the SPONGE, based on a similar principle with fewer approximations. Additionally, the SPONGE coupled to McSAS3 allows one to retrieve information on the uncertainty of the size distribution using a Monte Carlo uncertainty estimation algorithm.}, } @article {pmid35972790, year = {2022}, author = {Ngu, AH and Metsis, V and Coyne, S and Srinivas, P and Salad, T and Mahmud, U and Chee, KH}, title = {Personalized Watch-Based Fall Detection Using a Collaborative Edge-Cloud Framework.}, journal = {International journal of neural systems}, volume = {32}, number = {12}, pages = {2250048}, doi = {10.1142/S0129065722500484}, pmid = {35972790}, issn = {1793-6462}, mesh = {Humans ; Aged ; *Accidental Falls/prevention & control ; *Smartphone ; Automation ; Software ; }, abstract = {The majority of current smart health applications are deployed on a smartphone paired with a smartwatch. The phone is used as the computation platform or the gateway for connecting to the cloud while the watch is used mainly as the data sensing device. In the case of fall detection applications for older adults, this kind of setup is not very practical since it requires users to always keep their phones in proximity while doing the daily chores. When a person falls, in a moment of panic, it might be difficult to locate the phone in order to interact with the Fall Detection App for the purpose of indicating whether they are fine or need help. This paper demonstrates the feasibility of running a real-time personalized deep-learning-based fall detection system on a smartwatch device using a collaborative edge-cloud framework. In particular, we present the software architecture we used for the collaborative framework, demonstrate how we automate the fall detection pipeline, design an appropriate UI on the small screen of the watch, and implement strategies for the continuous data collection and automation of the personalization process with the limited computational and storage resources of a smartwatch. We also present the usability of such a system with nine real-world older adult participants.}, } @article {pmid35972192, year = {2022}, author = {Poolman, TM and Townsend-Nicholson, A and Cain, A}, title = {Teaching genomics to life science undergraduates using cloud computing platforms with open datasets.}, journal = {Biochemistry and molecular biology education : a bimonthly publication of the International Union of Biochemistry and Molecular Biology}, volume = {50}, number = {5}, pages = {446-449}, pmid = {35972192}, issn = {1539-3429}, mesh = {*COVID-19/epidemiology ; *Cloud Computing ; Genomics ; Humans ; Software ; Students ; }, abstract = {The final year of a biochemistry degree is usually a time to experience research. However, laboratory-based research projects were not possible during COVID-19. Instead, we used open datasets to provide computational research projects in metagenomics to biochemistry undergraduates (80 students with limited computing experience). We aimed to give the students a chance to explore any dataset, rather than use a small number of artificial datasets (~60 published datasets were used). To achieve this, we utilized Google Colaboratory (Colab), a virtual computing environment. Colab was used as a framework to retrieve raw sequencing data (analyzed with QIIME2) and generate visualizations. Setting up the environment requires no prior experience; all students have the same drive structure and notebooks can be shared (for synchronous sessions). We also used the platform to combine multiple datasets, perform a meta-analysis, and allowed the students to analyze large datasets with 1000s of subjects and factors. Projects that required increased computational resources were integrated with Google Cloud Compute. In future, all research projects can include some aspects of reanalyzing public data, providing students with data science experience. Colab is also an excellent environment in which to develop data skills in multiple languages (e.g., Perl, Python, Julia).}, } @article {pmid35970834, year = {2022}, author = {Kim, M and Jiang, X and Lauter, K and Ismayilzada, E and Shams, S}, title = {Secure human action recognition by encrypted neural network inference.}, journal = {Nature communications}, volume = {13}, number = {1}, pages = {4799}, pmid = {35970834}, issn = {2041-1723}, support = {R01 AG066749/AG/NIA NIH HHS/United States ; R13 HG009072/HG/NHGRI NIH HHS/United States ; }, mesh = {*Activities of Daily Living ; Algorithms ; Cloud Computing ; *Computer Security ; Humans ; Neural Networks, Computer ; Pattern Recognition, Automated ; }, abstract = {Advanced computer vision technology can provide near real-time home monitoring to support "aging in place" by detecting falls and symptoms related to seizures and stroke. Affordable webcams, together with cloud computing services (to run machine learning algorithms), can potentially bring significant social benefits. However, it has not been deployed in practice because of privacy concerns. In this paper, we propose a strategy that uses homomorphic encryption to resolve this dilemma, which guarantees information confidentiality while retaining action detection. Our protocol for secure inference can distinguish falls from activities of daily living with 86.21% sensitivity and 99.14% specificity, with an average inference latency of 1.2 seconds and 2.4 seconds on real-world test datasets using small and large neural nets, respectively. We show that our method enables a 613x speedup over the latency-optimized LoLa and achieves an average of 3.1x throughput increase in secure inference compared to the throughput-optimized nGraph-HE2.}, } @article {pmid35968406, year = {2023}, author = {Gupta, YP and Mukul, and Gupta, N}, title = {Deep learning model based multimedia retrieval and its optimization in augmented reality applications.}, journal = {Multimedia tools and applications}, volume = {82}, number = {6}, pages = {8447-8466}, pmid = {35968406}, issn = {1380-7501}, abstract = {With the uproar of touchless technology, the Virtual Continuum has seen some spark in the upcoming products. Today numerous gadgets support the use of Mixed Reality / Augmented Reality (AR)/ Virtual Reality. The Head Mounted Displays (HMDs) like that of Hololens, Google Lens, Jio Glass manifested reality into virtuality. Other than the HMDs many organizations tend to develop mobile AR applications to support umpteen number of industries like medicine, education, construction. Currently, the major issue lies in the performance parameters of these applications, while deploying for mobile application's graphics performance, latency, and CPU functioning. Many industries pose real-time computation requirements in AR but do not implement an efficient algorithm in their frameworks. Offloading the computation of deep learning models involved in the application to the cloud servers will highly affect the processing parameters. For our use case, we will be using Multi-Task Cascaded Convolutional Neural Network (MTCNN) which is a modern tool for face detection, using a 3-stage neural network detector. Therefore, the optimization of communication between local application and cloud computing frameworks needs to be optimized. The proposed framework defines how the parameters involving the complete deployment of a mobile AR application can be optimized in terms of retrieval of multimedia, its processing, and augmentation of graphics, eventually enhancing the performance. To implement the proposed algorithm a mobile application is created in Unity3D. The mobile application virtually augments a 3D model of a skeleton on a target face. After the mentioned experimentation, it is found that average Media Retrieval Time (1.1471 μ s) and Client Time (1.1207 μ s) in the local application are extremely low than the average API process time (288.934ms). The highest time latency is achieved at the frame rate higher than 80fps.}, } @article {pmid35968403, year = {2022}, author = {Finnegan, A and Potenziani, DD and Karutu, C and Wanyana, I and Matsiko, N and Elahi, C and Mijumbi, N and Stanley, R and Vota, W}, title = {Deploying machine learning with messy, real world data in low- and middle-income countries: Developing a global health use case.}, journal = {Frontiers in big data}, volume = {5}, number = {}, pages = {553673}, pmid = {35968403}, issn = {2624-909X}, abstract = {The rapid emergence of machine learning in the form of large-scale computational statistics and accumulation of data offers global health implementing partners an opportunity to adopt, adapt, and apply these techniques and technologies to low- and middle-income country (LMIC) contexts where we work. These benefits reside just out of the reach of many implementing partners because they lack the experience and specific skills to use them. Yet the growth of available analytical systems and exponential growth of data require the global digital health community to become conversant in this technology to continue to make contributions to help fulfill our missions. In this community case study, we describe the approach we took at IntraHealth International to inform the use case for machine learning in global health and development. We found that the data needed to take advantage of machine learning were plentiful and that an international, interdisciplinary team can be formed to collect, clean, and analyze the data at hand using cloud-based (e.g., Dropbox, Google Drive) and open source tools (e.g., R). We organized our work as a "sprint" lasting roughly 10 weeks in length so that we could rapidly prototype these approaches in order to achieve institutional buy in. Our initial sprint resulted in two requests in subsequent workplans for analytics using the data we compiled and directly impacted program implementation.}, } @article {pmid35967636, year = {2022}, author = {Liu, S}, title = {Anti-monopoly supervision model of platform economy based on big data and sentiment.}, journal = {Frontiers in psychology}, volume = {13}, number = {}, pages = {953271}, pmid = {35967636}, issn = {1664-1078}, abstract = {With the advent of the cloud computing era, big data technology has also developed rapidly. Due to the huge volume, variety, fast processing speed and low value density of big data, traditional data storage, extraction, transformation and analysis technologies are not suitable, so new solutions for big data application technologies are needed. However, with the development of economic theory and the practice of market economy, some links in the industrial chain of natural monopoly industries already have a certain degree of competitiveness. In this context, the article conducts a research on the anti-monopoly supervision mode of platform economy based on big data and sentiment analysis. This paper introduces the main idea of MapReduce, the current software implementation specifies a Map function that maps a set of key-value pairs into a new set of key-value pairs. It specifies a concurrent Reduce function that guarantees that each of all mapped key-value pairs share the same set of keys. establishes a vector space model, and basically realizes the extraction of text emotional elements. It introduces the theoretical controversy of antitrust regulation of predatory pricing behavior of third-party payment platforms, and conducted model experiments. The experimental results show that the throughput of 40 test users in 1 h of test is determined by two factors, QPS and the number of concurrent, where QPS = 40/(60*60) transactions/second. The time for each test user to log in to the system is 10 min, and the average response time is 10*60 s, then the number of concurrency = QPS*average response time = 40/(60*60)*10*60 = 6.66. This paper has successfully completed the research on the anti-monopoly supervision model of platform economy based on big data and sentiment analysis.}, } @article {pmid35966392, year = {2022}, author = {Berisha, B and Mëziu, E and Shabani, I}, title = {Big data analytics in Cloud computing: an overview.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {11}, number = {1}, pages = {24}, pmid = {35966392}, issn = {2192-113X}, abstract = {Big Data and Cloud Computing as two mainstream technologies, are at the center of concern in the IT field. Every day a huge amount of data is produced from different sources. This data is so big in size that traditional processing tools are unable to deal with them. Besides being big, this data moves fast and has a lot of variety. Big Data is a concept that deals with storing, processing and analyzing large amounts of data. Cloud computing on the other hand is about offering the infrastructure to enable such processes in a cost-effective and efficient manner. Many sectors, including among others businesses (small or large), healthcare, education, etc. are trying to leverage the power of Big Data. In healthcare, for example, Big Data is being used to reduce costs of treatment, predict outbreaks of pandemics, prevent diseases etc. This paper, presents an overview of Big Data Analytics as a crucial process in many fields and sectors. We start by a brief introduction to the concept of Big Data, the amount of data that is generated on a daily bases, features and characteristics of Big Data. We then delve into Big Data Analytics were we discuss issues such as analytics cycle, analytics benefits and the movement from ETL to ELT paradigm as a result of Big Data analytics in Cloud. As a case study we analyze Google's BigQuery which is a fully-managed, serverless data warehouse that enables scalable analysis over petabytes of data. As a Platform as a Service (PaaS) supports querying using ANSI SQL. We use the tool to perform different experiments such as average read, average compute, average write, on different sizes of datasets.}, } @article {pmid35965760, year = {2022}, author = {Sadad, T and Bukhari, SAC and Munir, A and Ghani, A and El-Sherbeeny, AM and Rauf, HT}, title = {Detection of Cardiovascular Disease Based on PPG Signals Using Machine Learning with Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {1672677}, pmid = {35965760}, issn = {1687-5273}, mesh = {Bayes Theorem ; *COVID-19/diagnosis ; *Cardiovascular Diseases/diagnosis ; Cloud Computing ; Humans ; Machine Learning ; Pandemics ; Photoplethysmography/methods ; }, abstract = {Hypertension is the main cause of blood pressure (BP), which further causes various cardiovascular diseases (CVDs). The recent COVID-19 pandemic raised the burden on the healthcare system and also limits the resources to these patients only. The treatment of chronic patients, especially those who suffer from CVD, has fallen behind, resulting in increased deaths from CVD around the world. Regular monitoring of BP is crucial to prevent CVDs as it can be controlled and diagnosed through constant monitoring. To find an effective and convenient procedure for the early diagnosis of CVDs, photoplethysmography (PPG) is recognized as a low-cost technology. Through PPG technology, various cardiovascular parameters, including blood pressure, heart rate, blood oxygen saturation, etc., are detected. Merging the healthcare domain with information technology (IT) is a demanding area to reduce the rehospitalization of CVD patients. In the proposed model, PPG signals from the Internet of things (IoT)-enabled wearable patient monitoring (WPM) devices are used to monitor the heart rate (HR), etc., of the patients remotely. This article investigates various machine learning techniques such as decision tree (DT), naïve Bayes (NB), and support vector machine (SVM) and the deep learning model one-dimensional convolutional neural network-long short-term memory (1D CNN-LSTM) to develop a system that assists physicians during continuous monitoring, which achieved an accuracy of 99.5% using PPG-BP data set. The proposed system provides cost-effective, efficient, and fully connected monitoring systems for cardiac patients.}, } @article {pmid35963375, year = {2022}, author = {Palomeque-Mangut, S and Meléndez, F and Gómez-Suárez, J and Frutos-Puerto, S and Arroyo, P and Pinilla-Gil, E and Lozano, J}, title = {Wearable system for outdoor air quality monitoring in a WSN with cloud computing: Design, validation and deployment.}, journal = {Chemosphere}, volume = {307}, number = {Pt 3}, pages = {135948}, doi = {10.1016/j.chemosphere.2022.135948}, pmid = {35963375}, issn = {1879-1298}, mesh = {*Air Pollutants/analysis ; *Air Pollution/analysis ; Cloud Computing ; Environmental Monitoring/methods ; Humans ; Oxides ; *Wearable Electronic Devices ; }, abstract = {Breathing poor-quality air is a global threat at the same level as unhealthy diets or tobacco smoking, so the availability of affordable instrument for the measurement of air pollutant levels is highly relevant for human and environmental protection. We developed an air quality monitoring platform that comprises a wearable device embedding low-cost metal oxide semiconductor (MOS) gas sensors, a PM sensor, and a smartphone for collecting the data using Bluetooth Low Energy (BLE) communication. Our own developed app displays information about the air surrounding the user and sends the gathered geolocalized data to a cloud, where the users can map the air quality levels measured in the network. The resulting device is small-sized, light-weighted, compact, and belt-worn, with a user-friendly interface and a low cost. The data collected by the sensor array are validated in two experimental setups, first in laboratory-controlled conditions and then against referential pollutant concentrations measured by standard instruments in an outdoor environment. The performance of our air quality platform was tested in a field testing campaign in Barcelona with six moving devices acting as wireless sensor nodes. Devices were trained by means of machine learning algorithms to differentiate between air quality index (AQI) referential concentration values (97% success in the laboratory, 82.3% success in the field). Humidity correction was applied to all data.}, } @article {pmid35958753, year = {2022}, author = {Qi, W and Wang, H and Chen, T}, title = {Multimedia System Design and Data Storage Optimization Based on Machine Learning Algorithm.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6426551}, pmid = {35958753}, issn = {1687-5273}, mesh = {Algorithms ; *Information Storage and Retrieval ; Machine Learning ; *Multimedia ; Reproducibility of Results ; }, abstract = {With the advancement of science and technology, digital technology and Internet of Things network technology have been developed rapidly, and multimedia technology has also been widely used. Multimedia formats such as digital TV and elevator posters are shaking up traditional media. At the same time, many media operation models and multimedia technologies are combined to plan operational strategies, determine operational goals, and change the traditional media structure to achieve commercial profits and society benefit. However, due to limitations in the existing operating model or unreasonable technical solutions, it is not easy to maximize the value of multimedia technology. The XML-based database has been submitted, and it will carry out the business requirements of the transaction network and the business platform of the transaction network. Integrated management mechanism is analyzed and applied. The framework design includes parallel quota processing module, update processing module, result processing module, and storage library and database connection management module. The department runs multiple parts of the system together and completes the database. The development of cloud database is based on cloud computing. It can effectively fill the shortcomings and gaps of traditional database storage and processing, and it can also provide high-reciprocity databases to provide storage and management services. It has high reliability. Cloud servers use fair weighted rounding algorithms to achieve load balancing and use the in-memory database Redis to realize terminal data caching. After a comprehensive test of the system, the system can perform all functions normally, and it has good performance and stable operation.}, } @article {pmid35958748, year = {2022}, author = {Rahman, AU and Asif, RN and Sultan, K and Alsaif, SA and Abbas, S and Khan, MA and Mosavi, A}, title = {ECG Classification for Detecting ECG Arrhythmia Empowered with Deep Learning Approaches.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6852845}, pmid = {35958748}, issn = {1687-5273}, mesh = {Arrhythmias, Cardiac/diagnosis ; Cloud Computing ; *Deep Learning ; Electrocardiography/methods ; Humans ; Machine Learning ; }, abstract = {According to the World Health Organization (WHO) report, heart disease is spreading throughout the world very rapidly and the situation is becoming alarming in people aged 40 or above (Xu, 2020). Different methods and procedures are adopted to detect and diagnose heart abnormalities. Data scientists are working on finding the different methods with the required accuracy (Strodthoff et al., 2021). Electrocardiogram (ECG) is the procedure to find the heart condition in the waveform. For ages, the machine learning techniques, which are feature based, played a vital role in the medical sciences and centralized the data in cloud computing and having access throughout the world. Furthermore, deep learning or transfer learning widens the vision and introduces different transfer learning methods to ensure accuracy and time management to detect the ECG in a better way in comparison to the previous and machine learning methods. Hence, it is said that transfer learning has turned world research into more appropriate and innovative research. Here, the proposed comparison and accuracy analysis of different transfer learning methods by using ECG classification for detecting ECG Arrhythmia (CAA-TL). The CAA-TL model has the multiclassification of the ECG dataset, which has been taken from Kaggle. Some of the healthy and unhealthy datasets have been taken in real-time, augmented, and fused with the Kaggle dataset, i.e., Massachusetts Institute of Technology-Beth Israel Hospital (MIT-BIH dataset). The CAA-TL worked on the accuracy of heart problem detection by using different methods like ResNet50, AlexNet, and SqueezeNet. All three deep learning methods showed remarkable accuracy, which is improved from the previous research. The comparison of different deep learning approaches with respect to layers widens the research and gives the more clarity and accuracy and at the same time finds it time-consuming while working with multiclassification with massive dataset of ECG. The implementation of the proposed method showed an accuracy of 98.8%, 90.08%, and 91% for AlexNet, SqueezeNet, and ResNet50, respectively.}, } @article {pmid35958385, year = {2022}, author = {Jiang, S}, title = {Hotspot Mining in the Field of Library and Information Science under the Environment of Big Data.}, journal = {Journal of environmental and public health}, volume = {2022}, number = {}, pages = {2802835}, pmid = {35958385}, issn = {1687-9813}, mesh = {*Artificial Intelligence ; *Big Data ; Data Mining/methods ; }, abstract = {Currently, with the implementation of big data strategies in countries all over the world, big data has achieved vigorous development in various fields. Big data research and application practices have also rapidly attracted the attention of the library and information field. Objective. The study explored the current state of research and research hotspots of big data in the library and information field and further discussed the future research trends. Methods. In the CNKI database, 16 CSSCI source journals in the discipline of library information and digital library were selected as data sources, and the relevant literature was retrieved with the theme of "big data." The collected literature was excluded and expanded according to the citation relationship. Then, with the help of Bicomb and SPSS, co-word analysis and cluster analysis would be carried out on these literature results. Results. According to the findings of the data analysis, the research hotspots on the topic mainly focus on five major research themes, namely, big data and smart library, big data and intelligence research, data mining and cloud computing, big data and information analysis, and library innovation and services. Limitations. At present, the research scope and coverage on this topic are wide, which leads to the research still staying at the macro level. Conclusions. Big data research will remain one of the hotspots in the future. However, the most study is still limited to the perspective of library and information and has not yet analyzed the research status, research hotspots, and development trends in this field from the perspective of big data knowledge structure. Moreover, machine learning, artificial intelligence, knowledge services, AR, and VR may be new directions for future attention and development.}, } @article {pmid35957481, year = {2022}, author = {Foroughimehr, N and Vilagosh, Z and Yavari, A and Wood, A}, title = {The Impact of Base Cell Size Setup on the Finite Difference Time Domain Computational Simulation of Human Cornea Exposed to Millimeter Wave Radiation at Frequencies above 30 GHz.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {15}, pages = {}, pmid = {35957481}, issn = {1424-8220}, support = {APP1042464//National Health and Medical Research Council/ ; }, mesh = {Cell Size ; Computer Simulation ; *Cornea ; Electricity ; *Electromagnetic Fields ; Humans ; }, abstract = {Mobile communication has achieved enormous technology innovations over many generations of progression. New cellular technology, including 5G cellular systems, is being deployed and making use of higher frequencies, including the Millimetre Wave (MMW) range (30-300 GHz) of the electromagnetic spectrum. Numerical computational techniques such as the Finite Difference Time Domain (FDTD) method have been used extensively as an effective approach for assessing electromagnetic fields' biological impacts. This study demonstrates the variation of the accuracy of the FDTD computational simulation system when different meshing sizes are used, by using the interaction of the critically sensitive human cornea with EM in the 30 to 100 GHz range. Different approaches of base cell size specifications were compared. The accuracy of the computation is determined by applying planar sensors showing the detail of electric field distribution as well as the absolute values of electric field collected by point sensors. It was found that manually defining the base cell sizes reduces the model size as well as the computation time. However, the accuracy of the computation decreases in an unpredictable way. The results indicated that using a cloud computing capacity plays a crucial role in minimizing the computation time.}, } @article {pmid35957453, year = {2022}, author = {Bahache, M and Tahari, AEK and Herrera-Tapia, J and Lagraa, N and Calafate, CT and Kerrache, CA}, title = {Towards an Accurate Faults Detection Approach in Internet of Medical Things Using Advanced Machine Learning Techniques.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {15}, pages = {}, pmid = {35957453}, issn = {1424-8220}, mesh = {Humans ; Internet ; *Machine Learning ; *Wireless Technology ; }, abstract = {Remotely monitoring people's healthcare is still among the most important research topics for researchers from both industry and academia. In addition, with the Wireless Body Networks (WBANs) emergence, it becomes possible to supervise patients through an implanted set of body sensors that can communicate through wireless interfaces. These body sensors are characterized by their tiny sizes, and limited resources (power, computing, and communication capabilities), which makes these devices prone to have faults and sensible to be damaged. Thus, it is necessary to establish an efficient system to detect any fault or anomalies when receiving sensed data. In this paper, we propose a novel, optimized, and hybrid solution between machine learning and statistical techniques, for detecting faults in WBANs that do not affect the devices' resources and functionality. Experimental results illustrate that our approach can detect unwanted measurement faults with a high detection accuracy ratio that exceeds the 99.62%, and a low mean absolute error of 0.61%, clearly outperforming the existing state-of-art solutions.}, } @article {pmid35957452, year = {2022}, author = {Kim, M and Joo, S}, title = {Time-Constrained Adversarial Defense in IoT Edge Devices through Kernel Tensor Decomposition and Multi-DNN Scheduling.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {15}, pages = {}, pmid = {35957452}, issn = {1424-8220}, support = {NRF-2020R1G1A1012170//National Research Foundation of Korea/ ; }, abstract = {The development of deep learning technology has resulted in great contributions in many artificial intelligence services, but adversarial attack techniques on deep learning models are also becoming more diverse and sophisticated. IoT edge devices take cloud-independent on-device DNN (deep neural network) processing technology to exhibit a fast response time. However, if the computational complexity of the denoizer for adversarial noises is high, or if a single embedded GPU is shared by multiple DNN models, adversarial defense at the on-device level is bound to represent a long latency. To solve this problem, eDenoizer is proposed in this paper. First, it applies Tucker decomposition to reduce the computational amount required for convolutional kernel tensors in the denoizer. Second, eDenoizer effectively orchestrates both the denoizer and the model defended by the denoizer simultaneously. In addition, the priority of the CPU side can be projected onto the GPU which is completely priority-agnostic, so that the delay can be minimized when the denoizer and the defense target model are assigned a high priority. As a result of confirming through extensive experiments, the reduction of classification accuracy was very marginal, up to 1.78%, and the inference speed accompanied by adversarial defense was improved up to 51.72%.}, } @article {pmid35957450, year = {2022}, author = {Liutkevičius, A and Morkevičius, N and Venčkauskas, A and Toldinas, J}, title = {Distributed Agent-Based Orchestrator Model for Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {15}, pages = {}, pmid = {35957450}, issn = {1424-8220}, support = {830892//European Union's Horizon 2020 research and innovation program/ ; }, mesh = {*Algorithms ; *Cloud Computing ; Delivery of Health Care ; }, abstract = {Fog computing is an extension of cloud computing that provides computing services closer to user end-devices at the network edge. One of the challenging topics in fog networks is the placement of tasks on fog nodes to obtain the best performance and resource usage. The process of mapping tasks for resource-constrained devices is known as the service or fog application placement problem (SPP, FAPP). The highly dynamic fog infrastructures with mobile user end-devices and constantly changing fog nodes resources (e.g., battery life, security level) require distributed/decentralized service placement (orchestration) algorithms to ensure better resilience, scalability, and optimal real-time performance. However, recently proposed service placement algorithms rarely support user end-device mobility, constantly changing the resource availability of fog nodes and the ability to recover from fog node failures at the same time. In this article, we propose a distributed agent-based orchestrator model capable of flexible service provisioning in a dynamic fog computing environment by considering the constraints on the central processing unit (CPU), memory, battery level, and security level of fog nodes. Distributing the decision-making to multiple orchestrator fog nodes instead of relying on the mapping of a single central entity helps to spread the load and increase scalability and, most importantly, resilience. The prototype system based on the proposed orchestrator model was implemented and tested with real hardware. The results show that the proposed model is efficient in terms of response latency and computational overhead, which are minimal compared to the placement algorithm itself. The research confirms that the proposed orchestrator approach is suitable for various fog network applications when scalability, mobility, and fault tolerance must be guaranteed.}, } @article {pmid35957307, year = {2022}, author = {Ismail, L and Buyya, R}, title = {Artificial Intelligence Applications and Self-Learning 6G Networks for Smart Cities Digital Ecosystems: Taxonomy, Challenges, and Future Directions.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {15}, pages = {}, pmid = {35957307}, issn = {1424-8220}, support = {31R215//National Water and Energy Center of the United Arab Emirates University/ ; }, mesh = {*Artificial Intelligence ; Cities ; *Ecosystem ; Technology/methods ; Wireless Technology ; }, abstract = {The recent upsurge of smart cities' applications and their building blocks in terms of the Internet of Things (IoT), Artificial Intelligence (AI), federated and distributed learning, big data analytics, blockchain, and edge-cloud computing has urged the design of the upcoming 6G network generation, due to their stringent requirements in terms of the quality of services (QoS), availability, and dependability to satisfy a Service-Level-Agreement (SLA) for the end users. Industries and academia have started to design 6G networks and propose the use of AI in its protocols and operations. Published papers on the topic discuss either the requirements of applications via a top-down approach or the network requirements in terms of agility, performance, and energy saving using a down-top perspective. In contrast, this paper adopts a holistic outlook, considering the applications, the middleware, the underlying technologies, and the 6G network systems towards an intelligent and integrated computing, communication, coordination, and decision-making ecosystem. In particular, we discuss the temporal evolution of the wireless network generations' development to capture the applications, middleware, and technological requirements that led to the development of the network generation systems from 1G to AI-enabled 6G and its employed self-learning models. We provide a taxonomy of the technology-enabled smart city applications' systems and present insights into those systems for the realization of a trustworthy and efficient smart city ecosystem. We propose future research directions in 6G networks for smart city applications.}, } @article {pmid35957281, year = {2022}, author = {Alwaheidi, MKS and Islam, S}, title = {Data-Driven Threat Analysis for Ensuring Security in Cloud Enabled Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {15}, pages = {}, pmid = {35957281}, issn = {1424-8220}, mesh = {*Cloud Computing ; Computer Security ; *Ecosystem ; }, abstract = {Cloud computing offers many benefits including business flexibility, scalability and cost savings but despite these benefits, there exist threats that require adequate attention for secure service delivery. Threats in a cloud-based system need to be considered from a holistic perspective that accounts for data, application, infrastructure and service, which can pose potential risks. Data certainly plays a critical role within the whole ecosystem and organisations should take account of and protect data from any potential threats. Due to the variation of data types, status, and location, understanding the potential security concerns in cloud-based infrastructures is more complex than in a traditional system. The existing threat modeling approaches lack the ability to analyse and prioritise data-related threats. The main contribution of the paper is a novel data-driven threat analysis (d-TM) approach for the cloud-based systems. The main motivation of d-TM is the integration of data from three levels of abstractions, i.e., management, control, and business and three phases, i.e., storage, process and transmittance, within each level. The d-TM provides a systematic flow of attack surface analysis from the user agent to the cloud service provider based on the threat layers in cloud computing. Finally, a cloud-based use case scenario was used to demonstrate the applicability of the proposed approach. The result shows that d-TM revealed four critical threats out of the seven threats based on the identified assets. The threats targeted management and business data in general, while targeting data in process and transit more specifically.}, } @article {pmid35945076, year = {2022}, author = {Jones, HE and Wilson, PB}, title = {Progress and opportunities through use of genomics in animal production.}, journal = {Trends in genetics : TIG}, volume = {38}, number = {12}, pages = {1228-1252}, doi = {10.1016/j.tig.2022.06.014}, pmid = {35945076}, issn = {0168-9525}, mesh = {Animals ; Humans ; *Animal Husbandry ; *Livestock/genetics ; Animal Welfare ; Genomics ; Genome/genetics ; }, abstract = {The rearing of farmed animals is a vital component of global food production systems, but its impact on the environment, human health, animal welfare, and biodiversity is being increasingly challenged. Developments in genetic and genomic technologies have had a key role in improving the productivity of farmed animals for decades. Advances in genome sequencing, annotation, and editing offer a means not only to continue that trend, but also, when combined with advanced data collection, analytics, cloud computing, appropriate infrastructure, and regulation, to take precision livestock farming (PLF) and conservation to an advanced level. Such an approach could generate substantial additional benefits in terms of reducing use of resources, health treatments, and environmental impact, while also improving animal health and welfare.}, } @article {pmid35942755, year = {2022}, author = {Chiang, TW and Chiang, DL and Chen, TS and Lin, FY and Shen, VRL and Wang, MC}, title = {Novel Lagrange interpolation polynomials for dynamic access control in a healthcare cloud system.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {19}, number = {9}, pages = {9200-9219}, doi = {10.3934/mbe.2022427}, pmid = {35942755}, issn = {1551-0018}, mesh = {Algorithms ; Cloud Computing ; *Computer Security ; *Confidentiality ; Delivery of Health Care ; }, abstract = {The authority of user personal health records (PHRs) is usually determined by the owner of a cloud computing system. When a PHR file is accessed, a dynamic access control algorithm must be used to authenticate the users. The proposed dynamic access control algorithm is based on a novel Lagrange interpolation polynomial with timestamps, mainly functioning to authenticate the users with key information. Moreover, the inclusion of timestamps allows user access within an approved time slot to enhance the security of the healthcare cloud system. According to the security analysis results, this healthcare cloud system can effectively resist common attacks, including external attacks, internal attacks, collaborative attacks and equation-based attacks. Furthermore, the overall computational complexity of establishing and updating the polynomials is O(n*m* (log m)[2]), which is a promising result, where m denotes the degree of $ polynomial~G\left(x, y\right) $ and n denotes the number of secure users in the hierarchy.}, } @article {pmid35942754, year = {2022}, author = {Cui, D and Huang, H and Peng, Z and Li, Q and He, J and Qiu, J and Luo, X and Ou, J and Fan, C}, title = {Next-generation 5G fusion-based intelligent health-monitoring platform for ethylene cracking furnace tube.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {19}, number = {9}, pages = {9168-9199}, doi = {10.3934/mbe.2022426}, pmid = {35942754}, issn = {1551-0018}, mesh = {*Artificial Intelligence ; Automation ; Ethylenes ; *Intelligence ; }, abstract = {This study aimed to develop a 5G + "mixed computing" + deep learning-based next-generation intelligent health-monitoring platform for an ethylene cracking furnace tube based on 5G communication technology, with the goal of improving the health management level of the key component of ethylene production, that is, the cracking furnace tube, and focusing on the key common technical difficulties of ethylene production of tube outer-surface temperature sensing and tube slagging diagnosis. It also integrated the edge-fog-cloud "mixed computing" technology and deep learning technology in artificial intelligence, which had a higher degree in the research and development of automation and intelligence, and was more versatile in an industrial environment. The platform included a 5G-based tube intelligent temperature-measuring device, a 5G-based intelligent peep door gearing, a 5G-based edge-fog-cloud collaboration mechanism, and a mixed deep learning-related application. The platform enhanced the automation and intelligence of the enterprise, which could not only promote the quality and efficiency of the enterprise but also protect the safe operation of the cracking furnace device and lead the technological progress and transformation and upgrading of the industry through the application.}, } @article {pmid35942147, year = {2022}, author = {Zhang, T and Han, Q and Zhang, Z}, title = {Sport Resource Classification Algorithm for Health Promotion Based on Cloud Computing: Rhythmic Gymnastics' Example.}, journal = {Journal of environmental and public health}, volume = {2022}, number = {}, pages = {2587169}, pmid = {35942147}, issn = {1687-9813}, mesh = {Algorithms ; *Cloud Computing ; *Gymnastics ; Health Promotion ; }, abstract = {In the processing of rhythmic gymnastics resources, there are inefficiency problems such as confusion of teaching resources and lack of individuation. To improve the health access to teaching resource data, such as videos and documents, this study proposes a cloud computing-based personalized rhythmic gymnastics teaching resource classification algorithm for health promotion. First, personalized rhythmic gymnastics teaching resource database is designed based on cloud computing technology, and the teaching resources in the database are preprocessed to obtain a meta-sample set. Then, the characteristics of teaching resources are selected by the information acquisition method, and a vector space model is established to calculate the similarity of teaching resources. Finally, the distance-weighted k-NN method is used to classify the teaching resources for health promotion. The experimental results show that the classification accuracy of the proposed algorithm is high, the recall rate is high, and the F-measure value is high, which verifies the effectiveness of the algorithm.}, } @article {pmid35937323, year = {2022}, author = {Huang, C and Li, W and Zhang, Z and Hua, X and Yang, J and Ye, J and Duan, L and Liang, X and Yang, W}, title = {An Intelligent Rice Yield Trait Evaluation System Based on Threshed Panicle Compensation.}, journal = {Frontiers in plant science}, volume = {13}, number = {}, pages = {900408}, pmid = {35937323}, issn = {1664-462X}, abstract = {High-throughput phenotyping of yield-related traits is meaningful and necessary for rice breeding and genetic study. The conventional method for rice yield-related trait evaluation faces the problems of rice threshing difficulties, measurement process complexity, and low efficiency. To solve these problems, a novel intelligent system, which includes an integrated threshing unit, grain conveyor-imaging units, threshed panicle conveyor-imaging unit, and specialized image analysis software has been proposed to achieve rice yield trait evaluation with high throughput and high accuracy. To improve the threshed panicle detection accuracy, the Region of Interest Align, Convolution Batch normalization activation with Leaky Relu module, Squeeze-and-Excitation unit, and optimal anchor size have been adopted to optimize the Faster-RCNN architecture, termed 'TPanicle-RCNN,' and the new model achieved F1 score 0.929 with an increase of 0.044, which was robust to indica and japonica varieties. Additionally, AI cloud computing was adopted, which dramatically reduced the system cost and improved flexibility. To evaluate the system accuracy and efficiency, 504 panicle samples were tested, and the total spikelet measurement error decreased from 11.44 to 2.99% with threshed panicle compensation. The average measuring efficiency was approximately 40 s per sample, which was approximately twenty times more efficient than manual measurement. In this study, an automatic and intelligent system for rice yield-related trait evaluation was developed, which would provide an efficient and reliable tool for rice breeding and genetic research.}, } @article {pmid35931501, year = {2022}, author = {Kumon, RE}, title = {Teaching an advanced undergraduate acoustics laboratory without a laboratory: Course developments enabling teaching during the COVID-19 pandemic.}, journal = {The Journal of the Acoustical Society of America}, volume = {152}, number = {1}, pages = {9}, doi = {10.1121/10.0011808}, pmid = {35931501}, issn = {1520-8524}, mesh = {Acoustics ; *COVID-19/epidemiology ; Humans ; Learning ; Pandemics ; Students ; Teaching ; }, abstract = {This paper describes ongoing developments to an advanced laboratory course at Kettering University, which is targeted to students in engineering and engineering physics and emphasizes theoretical, computational, and experimental components in the context of airborne acoustics and modal testing [cf. D. A. Russell and D. O. Ludwigsen, J. Acoust. Soc. Am. 131, 2515-2524 (2012)]. These developments have included a transition to electronic laboratory notebooks and cloud-based computing resources, incorporation of updated hardware and software, and creation and testing of a multiple-choice assessment instrument for the course. When Kettering University suddenly shifted to exclusively remote teaching in March 2020 due to the COVID-19 pandemic, many of these changes proved to be essential for enabling rapid adaptation to a situation in which a laboratory was not available for the course. Laboratory activities were rewritten by crowdsourcing archived data, videos were incorporated to illustrate dynamic phenomena, and computer simulations were used to retain student interactivity. The comparison of multiple measures, including the assessment instrument, team-based grades on project papers, and individual grades on final exams, indicates that most students were successful at learning the course material and adapting to work on team-based projects in the midst of challenging remote learning conditions.}, } @article {pmid35930042, year = {2023}, author = {Mokhtarzadeh, H and Jiang, F and Zhao, S and Malekipour, F}, title = {OpenColab project: OpenSim in Google colaboratory to explore biomechanics on the web.}, journal = {Computer methods in biomechanics and biomedical engineering}, volume = {26}, number = {9}, pages = {1055-1063}, doi = {10.1080/10255842.2022.2104607}, pmid = {35930042}, issn = {1476-8259}, mesh = {*User-Computer Interface ; Biomechanical Phenomena ; *Search Engine ; Software ; Internet ; }, abstract = {OpenSim is an open-source biomechanical package with a variety of applications. It is available for many users with bindings in MATLAB, Python, and Java via its application programming interfaces (APIs). Although the developers described well the OpenSim installation on different operating systems (Windows, Mac, and Linux), it is time-consuming and complex since each operating system requires a different configuration. This project aims to demystify the development of neuro-musculoskeletal modeling in OpenSim with zero configuration on any operating system for installation (thus cross-platform), easy to share models while accessing free graphical processing units (GPUs) on a web-based platform of Google Colab. To achieve this, OpenColab was developed where OpenSim source code was used to build a Conda package that can be installed on the Google Colab with only one block of code in less than 7 min. To use OpenColab, one requires a connection to the internet and a Gmail account. Moreover, OpenColab accesses vast libraries of machine learning methods available within free Google products, e.g. TensorFlow. Next, we performed an inverse problem in biomechanics and compared OpenColab results with OpenSim graphical user interface (GUI) for validation. The outcomes of OpenColab and GUI matched well (r≥0.82). OpenColab takes advantage of the zero-configuration of cloud-based platforms, accesses GPUs, and enables users to share and reproduce modeling approaches for further validation, innovative online training, and research applications. Step-by-step installation processes and examples are available at: https://simtk.org/projects/opencolab.}, } @article {pmid35928494, year = {2022}, author = {Amanat, A and Rizwan, M and Maple, C and Zikria, YB and Almadhor, AS and Kim, SW}, title = {Blockchain and cloud computing-based secure electronic healthcare records storage and sharing.}, journal = {Frontiers in public health}, volume = {10}, number = {}, pages = {938707}, pmid = {35928494}, issn = {2296-2565}, mesh = {*Blockchain ; Cloud Computing ; Delivery of Health Care ; Electronic Health Records ; Electronics ; Humans ; }, abstract = {Healthcare information is essential for both service providers and patients. Further secure sharing and maintenance of Electronic Healthcare Records (EHR) are imperative. EHR systems in healthcare have traditionally relied on a centralized system (e.g., cloud) to exchange health data across healthcare stakeholders, which may expose private and sensitive patient information. EHR has struggled to meet the demands of several stakeholders and systems in terms of safety, isolation, and other regulatory constraints. Blockchain is a distributed, decentralized ledger technology that can provide secured, validated, and immutable data sharing facilities. Blockchain creates a distributed ledger system using techniques of cryptography (hashes) that are consistent and permit actions to be carried out in a distributed manner without needing a centralized authority. Data exploitation is difficult and evident in a blockchain network due to its immutability. We propose an architecture based on blockchain technology that authenticates the user identity using a Proof of Stake (POS) cryptography consensus mechanism and Secure Hash Algorithm (SHA256) to secure EHR sharing among different electronic healthcare systems. An Elliptic Curve Digital Signature Algorithm (ECDSA) is used to verify EHR sensors to assemble and transmit data to cloud infrastructure. Results indicate that the proposed solution performs exceptionally well when compared with existing solutions, which include Proof-Of-Work (POW), Secure Hash Algorithm (SHA-1), and Message Digest (MD5) in terms of power consumption, authenticity, and security of healthcare records.}, } @article {pmid35923220, year = {2022}, author = {Qi, L and Wu, F and Ge, Z and Sun, Y}, title = {DeepMatch: Toward Lightweight in Point Cloud Registration.}, journal = {Frontiers in neurorobotics}, volume = {16}, number = {}, pages = {891158}, pmid = {35923220}, issn = {1662-5218}, abstract = {From source to target, point cloud registration solves for a rigid body transformation that aligns the two point clouds. IterativeClosest Point (ICP) and other traditional algorithms require a long registration time and are prone to fall into local optima. Learning-based algorithms such as Deep ClosestPoint (DCP) perform better than those traditional algorithms and escape from local optimality. However, they are still not perfectly robust and rely on the complex model design due to the extracted local features are susceptible to noise. In this study, we propose a lightweight point cloud registration algorithm, DeepMatch. DeepMatch extracts a point feature for each point, which is a spatial structure composed of each point itself, the center point of the point cloud, and the farthest point of each point. Because of the superiority of this per-point feature, the computing resources and time required by DeepMatch to complete the training are less than one-tenth of other learning-based algorithms with similar performance. In addition, experiments show that our algorithm achieves state-of-the-art (SOTA) performance on both clean, with Gaussian noise and unseen category datasets. Among them, on the unseen categories, compared to the previous best learning-based point cloud registration algorithms, the registration error of DeepMatch is reduced by two orders of magnitude, achieving the same performance as on the categories seen in training, which proves DeepMatch is generalizable in point cloud registration tasks. Finally, only our DeepMatch completes 100% recall on all three test sets.}, } @article {pmid35922695, year = {2022}, author = {Pouya, S and Aghlmand, M}, title = {Evaluation of urban green space per capita with new remote sensing and geographic information system techniques and the importance of urban green space during the COVID-19 pandemic.}, journal = {Environmental monitoring and assessment}, volume = {194}, number = {9}, pages = {633}, pmid = {35922695}, issn = {1573-2959}, mesh = {*COVID-19/epidemiology ; Cities ; Environmental Monitoring/methods ; *Geographic Information Systems ; Humans ; Pandemics ; Parks, Recreational ; Remote Sensing Technology ; Urbanization ; }, abstract = {A recently conducted study by the Centers for Disease Control and Prevention encouraged access to urban green space for the public over the prevalence of COVID-19 in that exposure to urban green space can positively affect the physical and mental health, including the reduction rate of heart disease, obesity, stress, stroke, and depression. COVID-19 has foregrounded the inadequacy of green space in populated cities. It has also highlighted the extant inequities so as to unequal access to urban green space both quantitatively and qualitatively. In this regard, it seems that one of the problems related to Malatya is the uncoordinated distribution of green space in different parts of the city. Therefore, knowing the quantity and quality of these spaces in each region can play an effective role in urban planning. The aim of the present study has been to evaluate urban green space per capita and to investigate its distribution based on the population of the districts of Battalgazi county in Malatya city through developing an integrated methodology (remote sensing and geographic information system). Accordingly, in Google Earth Engine by images of Sentinel-1 and PlanetScope satellites, it was calculated different indexes (NDVI, EVI, PSSR, GNDVI, and NDWI). The data set was prepared and then by combining different data, classification was performed according to support vector machine algorithm. From the landscaping maps obtained, the map was selected with the highest accuracy (overall accuracy: 94.43; and kappa coefficient: 90.5). Finally, by the obtained last map, the distribution of urban green space per capita and their functions in Battalgazi county and its districts were evaluated. The results of the study showed that the existing urban green spaces in the Battalgazi/Malatya were not distributed evenly on the basis of the districts. The per capita of urban green space is twenty-four regions which is more than 9m[2] and in twenty-three ones is less than 9m[2]. The recommendation of this study was that Türkiye city planners and landscape designers should replan and redesign the quality and equal distribution of urban green spaces, especially during and following COVID-19 pandemic. Additionally, drawing on the Google Earth Engine cloud system, which has revolutionized GIS and remote sensing, is recommended to be used in land use land cover modeling. It is straightforward to access information and analyze them quickly in Google Earth Engine. The published codes in this study makes it possible to conduct further relevant studies.}, } @article {pmid35920716, year = {2022}, author = {Petrović, D and Scott, JS and Bodnarchuk, MS and Lorthioir, O and Boyd, S and Hughes, GM and Lane, J and Wu, A and Hargreaves, D and Robinson, J and Sadowski, J}, title = {Virtual Screening in the Cloud Identifies Potent and Selective ROS1 Kinase Inhibitors.}, journal = {Journal of chemical information and modeling}, volume = {62}, number = {16}, pages = {3832-3843}, doi = {10.1021/acs.jcim.2c00644}, pmid = {35920716}, issn = {1549-960X}, mesh = {*Carcinoma, Non-Small-Cell Lung ; Cloud Computing ; Drug Evaluation, Preclinical ; Humans ; *Lung Neoplasms ; Molecular Docking Simulation ; Prospective Studies ; Protein Kinase Inhibitors/chemistry/pharmacology ; Protein-Tyrosine Kinases ; Proto-Oncogene Proteins ; Receptor Protein-Tyrosine Kinases ; }, abstract = {ROS1 rearrangements account for 1-2% of non-small cell lung cancer patients, yet there are no specifically designed, selective ROS1 therapies in the clinic. Previous knowledge of potent ROS1 inhibitors with selectivity over TrkA, a selected antitarget, enabled virtual screening as a hit finding approach in this project. The ligand-based virtual screening was focused on identifying molecules with a similar 3D shape and pharmacophore to the known actives. To that end, we turned to the AstraZeneca virtual library, estimated to cover 10[15] synthesizable make-on-demand molecules. We used cloud computing-enabled FastROCS technology to search the enumerated 10[10] subset of the full virtual space. A small number of specific libraries were prioritized based on the compound properties and a medicinal chemistry assessment and further enumerated with available building blocks. Following the docking evaluation to the ROS1 structure, the most promising hits were synthesized and tested, resulting in the identification of several potent and selective series. The best among them gave a nanomolar ROS1 inhibitor with over 1000-fold selectivity over TrkA and, from the preliminary established SAR, these have the potential to be further optimized. Our prospective study describes how conceptually simple shape-matching approaches can identify potent and selective compounds by searching ultralarge virtual libraries, demonstrating the applicability of such workflows and their importance in early drug discovery.}, } @article {pmid35912308, year = {2022}, author = {Qie, D}, title = {The Relevance of Virtual-Assisted Early Childhood Education and Occupational Psychotherapy Based on Emotional Interaction.}, journal = {Occupational therapy international}, volume = {2022}, number = {}, pages = {2785987}, pmid = {35912308}, issn = {1557-0703}, mesh = {Child, Preschool ; Emotions ; Health Education ; Humans ; *Occupational Therapy ; Psychotherapy ; School Teachers ; }, abstract = {This paper presents an in-depth study and analysis of the relevance of early childhood education to occupational psychotherapy using a virtual-assisted affective interaction approach. Starting from the educational theory of interactive cognitive psychology, the theoretical basis for parent-child picture book education for interactive learning is explored, as well as the theoretical development after the introduction of AR technology. Firstly, the analysis of young children's emotions involves massive image processing, and the use of cloud computing architecture leads to high latency, while young children's safety is a latency-sensitive service. Secondly, face recognition accuracy based on static images is not high due to problems such as inconspicuous facial features of toddlers and low-quality kindergarten surveillance videos. In this paper, a face identity correction model based on location features is proposed and the superiority of the model is demonstrated through experiments. Finally, this paper analyzes and mines the emotional data of young children. The level of kindergarten teachers' awareness of early childhood mental health education generally showed an upward trend as their titles rose, and there were significant differences in the seven dimensions of early childhood mental health, the purpose and meaning of early childhood mental health education, implementers, targets, content, pathways, and effects; significant differences existed between teachers of different kindergarten natures, and there were significant differences in the purpose and meaning of early childhood mental health education, implementers, targets, content, pathways, effects, and mental health education for young children. Therefore, this paper proposes a face identity correction model based on position information, which considers both the correlation between pixel values in the spatial domain and the correlation between frames in the temporal domain. This paper has developed an emotion analysis system for kindergartens and put it into use in kindergartens to meet the needs of monitoring the safety of young children and evaluating early childhood education and has received good feedback from users, demonstrating the effectiveness of the system.}, } @article {pmid35910077, year = {2022}, author = {Lutnick, B and Manthey, D and Becker, JU and Zuckerman, JE and Rodrigues, L and Jen, KY and Sarder, P}, title = {A tool for federated training of segmentation models on whole slide images.}, journal = {Journal of pathology informatics}, volume = {13}, number = {}, pages = {100101}, pmid = {35910077}, issn = {2229-5089}, support = {U24 DK076169/DK/NIDDK NIH HHS/United States ; U24 DK115255/DK/NIDDK NIH HHS/United States ; R01 DK114485/DK/NIDDK NIH HHS/United States ; U2C DK114886/DK/NIDDK NIH HHS/United States ; U01 DK103225/DK/NIDDK NIH HHS/United States ; U54 HL145608/HL/NHLBI NIH HHS/United States ; }, abstract = {The largest bottleneck to the development of convolutional neural network (CNN) models in the computational pathology domain is the collection and curation of diverse training datasets. Training CNNs requires large cohorts of image data, and model generalizability is dependent on training data heterogeneity. Including data from multiple centers enhances the generalizability of CNN-based models, but this is hindered by the logistical challenges of sharing medical data. In this paper, we explore the feasibility of training our recently developed cloud-based segmentation tool (Histo-Cloud) using federated learning. Using a dataset of renal tissue biopsies we show that federated training to segment interstitial fibrosis and tubular atrophy (IFTA) using datasets from three institutions is not found to be different from a training by pooling the data on one server when tested on a fourth (holdout) institution's data. Further, training a model to segment glomeruli for a federated dataset (split by staining) demonstrates similar performance.}, } @article {pmid35909867, year = {2022}, author = {Zhang, H and Feng, Y and Wang, L}, title = {Cloud Computing to Tourism Economic Data Scheduling Algorithm under the Background of Image and Video.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {3948221}, pmid = {35909867}, issn = {1687-5273}, mesh = {Algorithms ; *Cloud Computing ; *Tourism ; }, abstract = {With the rapid development of image video and tourism economy, tourism economic data are gradually becoming big data. Therefore, how to schedule between data has become a hot topic. This paper first summarizes the research results on image video, cloud computing, tourism economy, and data scheduling algorithms. Secondly, the origin, structure, development, and service types of cloud computing are expounded in detail. And in order to solve the problem of tourism economic data scheduling, this paper regards the completion time and cross-node transmission delay as the constraints of tourism economic data scheduling. The constraint model of data scheduling is established, the fitness function is improved on the basis of an artificial immune algorithm combined with the constraint model, and the directional recombination of excellent antibodies is carried out by using the advantages of gene recombination so as to obtain the optimal solution to the problem more appropriately. When the resource node scale is 100, the response time of EDSA is 107.92 seconds.}, } @article {pmid35909865, year = {2022}, author = {Yan, S and Shi, L and Wang, L}, title = {Influence of the Urban Built Environment on Physical and Mental Health of the Elderly under the Background of Big Data.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4266723}, pmid = {35909865}, issn = {1687-5273}, mesh = {Aged ; *Big Data ; Built Environment ; Cities ; City Planning ; Humans ; Male ; *Mental Health ; }, abstract = {With the advent of the information technology revolution and the Internet era, information technology is gradually occupying an important position and becoming an important strategic factor in economic development. As an emerging technology that has been developing continuously in recent years, big data is becoming an important industry to improve the innovation and development of the urban economy. Like AI technology, cloud computing, and the Internet, big data has become an important application technology for economic growth and economic efficiency improvement in today's world. It is an effective means of progress and development in a region and an important strategic resource. As a new technology, big data has attracted more and more attention from all walks of life. Many companies have turned their attention to developing big data for economic benefits. "Enjoy your old age" is the yearning of every old man and his family. In recent years, the national level has been committed to "creating an urban built environment for the elderly to achieve healthy aging." From the perspective of promoting the physical and mental health of the elderly, this paper analyzes the impact of the urban built environment on the physical and mental health of the elderly based on the needs of the elderly and puts forward countermeasures and suggestions based on the current status and existing problems of the urban built environment for the elderly. Based on the combined data analysis method and technology in big data, this paper conducted a field questionnaire survey on a total of 4,000 elderly people in urban and rural areas by means of the questionnaire survey. It is found that the existing problems of the built environment in the old cities include scattered content, one-sided understanding, and rigid design. According to the problems, the solutions of building consensus, paying attention to planning, combining urban characteristics, and the joint efforts of all sectors of society are put forward. And programming tools are used to combine formulas and analyze related data in detail. The analysis results show that the physical and mental health index of the elderly is highly correlated with factors such as changes in the consensus degree of the urban built environment, urban built environment planning, urban built environment policy support, and multiparty efforts in the urban built environment. Changes show a positive change.}, } @article {pmid35903800, year = {2022}, author = {Mishra, N and Singh, RK and Yadav, SK}, title = {Detection of DDoS Vulnerability in Cloud Computing Using the Perplexed Bayes Classifier.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9151847}, pmid = {35903800}, issn = {1687-5273}, mesh = {*Algorithms ; Bayes Theorem ; *Cloud Computing ; Machine Learning ; }, abstract = {Cloud computing security has been a critical issue with its increase in demand. One of the most challenging problems in cloud computing is detecting distributed denial-of-service (DDoS) attacks. The attack detection framework for the DDoS attack is tricky because of its nonlinear nature of interruption activities, atypical system traffic behaviour, and many features in the problem space. As a result, creating defensive solutions against these attacks is critical for mainstream cloud computing adoption. In this novel research, by using performance parameters, perplexed-based classifiers with and without feature selection will be compared with the existing machine learning algorithms such as naïve Bayes and random forest to prove the efficacy of the perplexed-based classification algorithm. Comparing the performance parameters like accuracy, sensitivity, and specificity, the proposed algorithm has an accuracy of 99%, which is higher than the existing algorithms, proving that the proposed algorithm is highly efficient in detecting the DDoS attacks in cloud computing systems. To extend our research in the area of nature-inspired computing, we compared our perplexed Bayes classifier feature selection with nature-inspired feature selection like genetic algorithm (GA) and particle swarm optimization (PSO) and found that our classifier is highly efficient in comparison with GA and PSO and their accuracies are 2% and 8%, respectively, less than those of perplexed Bayes classifier.}, } @article {pmid35901084, year = {2022}, author = {Ali-Eldin, AMT}, title = {A hybrid trust computing approach for IoT using social similarity and machine learning.}, journal = {PloS one}, volume = {17}, number = {7}, pages = {e0265658}, pmid = {35901084}, issn = {1932-6203}, mesh = {Algorithms ; Humans ; Machine Learning ; *Privacy ; *Trust ; }, abstract = {Every year, millions of new devices are added to the Internet of things, which has both great benefits and serious security risks for user data privacy. It is the device owners' responsibility to ensure that the ownership settings of Internet of things devices are maintained, allowing them to communicate with other user devices autonomously. The ultimate goal of the future Internet of Things is for it to be able to make decisions on its own, without the need for human intervention. Therefore, trust computing and prediction have become more vital in the processing and handling of data as well as in the delivery of services. In this paper, we compute trust in social IoT scenarios using a hybrid approach that combines a distributed computation technique and a global machine learning approach. The approach considers social similarity while assessing other users' ratings and utilize a cloud-based architecture. Further, we propose a dynamic way to aggregate the different computed trust values. According to the results of the experimental work, it is shown that the proposed approaches outperform related work. Besides, it is shown that the use of machine learning provides slightly better performance than the computing model. Both proposed approaches were found successful in degrading malicious ratings without the need for more complex algorithms.}, } @article {pmid35898787, year = {2022}, author = {Lin, K}, title = {Big Data Technology in the Macrodecision-Making Model of Regional Industrial Economic Information Applied Research.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7400797}, pmid = {35898787}, issn = {1687-5273}, mesh = {*Artificial Intelligence ; *Big Data ; Data Mining ; Industry ; Technology ; }, abstract = {In the era of Internet +, modern industry has developed rapidly, the network economy has promoted the great development of the industrial economy, and the traditional industrial economic statistics method has not been suitable for the development needs of modern enterprises. In today's society, it can be described as the era of big data, the use of big data technology for industrial economic statistics is needed for the development of industrial modernization, and it is also a new requirement for industrial economic statistics put forward by social development. With the wide application of Internet of Things, cloud computing, mobile Internet, remote sensing, and geographic information technology in the economic field, precise economic policies have gradually developed and matured. Especially for different industries in the regional economy, according to the big data in the region, the big data mining technology and analysis technology can be used to obtain the development situation and future trend of the industrial economy in a timely and effective manner. Applying big data technology to macrodecision of regional economic information is an effective way to make macrodecision of current economy. Based on this background, this paper proposes a macroeconomic decision-making method for regional industries based on big data technology. Using data mining technology, time series data analysis methods combined with artificial intelligence analysis, the development trend of regional industries is obtained, and then the development trend of the industry is obtained. Development makes macroeconomic decisions. Taking agriculture as an example, the most popular analysis of the price trend of a certain agricultural product provides an effective reference for the development strategy of this agricultural product. The results show that the method proposed in this paper can effectively apply big data technology to the macrodecision-making of regional industrial economy. And it has better promotion significance.}, } @article {pmid35898480, year = {2022}, author = {Gao, S}, title = {Network Security Problems and Countermeasures of Hospital Information System after Going to the Cloud.}, journal = {Computational and mathematical methods in medicine}, volume = {2022}, number = {}, pages = {9725741}, pmid = {35898480}, issn = {1748-6718}, mesh = {*Cloud Computing ; *Computer Security ; Delivery of Health Care ; *Hospital Information Systems ; Hospitals ; Humans ; }, abstract = {In the current social context, information technology, network technology, and cloud computing have been widely used in all walks of life. The analysis of the specific application results of progressive technology shows that the use of technology has changed the working state of various industries and improved the work efficiency and quality of the industry. It should be noted that although the application of some technologies will bring many positive belongings, the potential risks brought by them cannot be ignored. As far as the hospital is concerned, the information system using cloud computing technology can make better use of the hospital's information data, but after the information system is on the cloud, new problems will appear in network security, resulting in the leakage of hospital patient information or research information. Based on this, in practice, it is necessary to analyze the network security problems after the hospital information system goes to the cloud and build and implement the corresponding strategies. The author analyzes and discusses the corresponding contents through work practice and combined with previous articles, in order to provide guidance and help for peers.}, } @article {pmid35897994, year = {2022}, author = {Wang, B and Ben, K and Lin, H and Zuo, M and Zhang, F}, title = {EP-ADTA: Edge Prediction-Based Adaptive Data Transfer Algorithm for Underwater Wireless Sensor Networks (UWSNs).}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {15}, pages = {}, pmid = {35897994}, issn = {1424-8220}, support = {52071153//National Natural Science Foundation of China/ ; }, abstract = {The underwater wireless sensor network is an important component of the underwater three-dimensional monitoring system. Due to the high bit error rate, high delay, low bandwidth, limited energy, and high dynamic of underwater networks, it is very difficult to realize efficient and reliable data transmission. Therefore, this paper posits that it is not enough to design the routing algorithm only from the perspective of the transmission environment; the comprehensive design of the data transmission algorithm should also be combined with the application. An edge prediction-based adaptive data transmission algorithm (EP-ADTA) is proposed that can dynamically adapt to the needs of underwater monitoring applications and the changes in the transmission environment. EP-ADTA uses the end-edge-cloud architecture to define the underwater wireless sensor networks. The algorithm uses communication nodes as the agents, realizes the monitoring data prediction and compression according to the edge prediction, dynamically selects the transmission route, and controls the data transmission accuracy based on reinforcement learning. The simulation results show that EP-ADTA can meet the accuracy requirements of underwater monitoring applications, dynamically adapt to the changes in the transmission environment, and ensure efficient and reliable data transmission in underwater wireless sensor networks.}, } @article {pmid35891110, year = {2022}, author = {Qiu, S and Li, A}, title = {Application of Chaos Mutation Adaptive Sparrow Search Algorithm in Edge Data Compression.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {14}, pages = {}, pmid = {35891110}, issn = {1424-8220}, support = {6140002010101,6140001030111//Central Military Commission/ ; }, mesh = {Algorithms ; Cloud Computing ; *Data Compression ; Mutation ; }, abstract = {In view of the large amount of data collected by an edge server, when compression technology is used for data compression, data classification accuracy is reduced and data loss is large. This paper proposes a data compression algorithm based on the chaotic mutation adaptive sparrow search algorithm (CMASSA). Constructing a new fitness function, CMASSA optimizes the hyperparameters of the Convolutional Auto-Encoder Network (CAEN) on the cloud service center, aiming to obtain the optimal CAEN model. The model is sent to the edge server to compress the data at the lower level of edge computing. The effectiveness of CMASSA performance is tested on ten high-dimensional benchmark functions, and the results show that CMASSA outperforms other comparison algorithms. Subsequently, experiments are compared with other literature on the Multi-class Weather Dataset (MWD). Experiments show that under the premise of ensuring a certain compression ratio, the proposed algorithm not only has better accuracy in classification tasks than other algorithms but also maintains a high degree of data reconstruction.}, } @article {pmid35891007, year = {2022}, author = {Alatoun, K and Matrouk, K and Mohammed, MA and Nedoma, J and Martinek, R and Zmij, P}, title = {A Novel Low-Latency and Energy-Efficient Task Scheduling Framework for Internet of Medical Things in an Edge Fog Cloud System.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {14}, pages = {}, pmid = {35891007}, issn = {1424-8220}, support = {SP2022/18 and No. SP2022/34//Ministry of Education Youth and Sports/ ; CZ.02.1.01/0.0/0.0/17_049/ 0008425//European Regional Development Fund in Research Platform focused on Industry 4.0 and Robotics in Ostrava project/ ; }, mesh = {*Algorithms ; *Cloud Computing ; Computer Simulation ; Electrocardiography ; Internet ; }, abstract = {In healthcare, there are rapid emergency response systems that necessitate real-time actions where speed and efficiency are critical; this may suffer as a result of cloud latency because of the delay caused by the cloud. Therefore, fog computing is utilized in real-time healthcare applications. There are still limitations in response time, latency, and energy consumption. Thus, a proper fog computing architecture and good task scheduling algorithms should be developed to minimize these limitations. In this study, an Energy-Efficient Internet of Medical Things to Fog Interoperability of Task Scheduling (EEIoMT) framework is proposed. This framework schedules tasks in an efficient way by ensuring that critical tasks are executed in the shortest possible time within their deadline while balancing energy consumption when processing other tasks. In our architecture, Electrocardiogram (ECG) sensors are used to monitor heart health at home in a smart city. ECG sensors send the sensed data continuously to the ESP32 microcontroller through Bluetooth (BLE) for analysis. ESP32 is also linked to the fog scheduler via Wi-Fi to send the results data of the analysis (tasks). The appropriate fog node is carefully selected to execute the task by giving each node a special weight, which is formulated on the basis of the expected amount of energy consumed and latency in executing this task and choosing the node with the lowest weight. Simulations were performed in iFogSim2. The simulation outcomes show that the suggested framework has a superior performance in reducing the usage of energy, latency, and network utilization when weighed against CHTM, LBS, and FNPA models.}, } @article {pmid35890918, year = {2022}, author = {Khanna, A and Sah, A and Bolshev, V and Burgio, A and Panchenko, V and Jasiński, M}, title = {Blockchain-Cloud Integration: A Survey.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {14}, pages = {}, pmid = {35890918}, issn = {1424-8220}, mesh = {*Blockchain ; Cloud Computing ; Computer Security ; Data Management ; Technology ; }, abstract = {Over the last couple of years, Blockchain technology has emerged as a game-changer for various industry domains, ranging from FinTech and the supply chain to healthcare and education, thereby enabling them to meet the competitive market demands and end-user requirements. Blockchain technology gained its popularity after the massive success of Bitcoin, of which it constitutes the backbone technology. While blockchain is still emerging and finding its foothold across domains, Cloud computing is comparatively well defined and established. Organizations such as Amazon, IBM, Google, and Microsoft have extensively invested in Cloud and continue to provide a plethora of related services to a wide range of customers. The pay-per-use policy and easy access to resources are some of the biggest advantages of Cloud, but it continues to face challenges like data security, compliance, interoperability, and data management. In this article, we present the advantages of integrating Cloud and blockchain technology along with applications of Blockchain-as-a-Service. The article presents itself with a detailed survey illustrating recent works combining the amalgamation of both technologies. The survey also talks about blockchain-cloud services being offered by existing Cloud Service providers.}, } @article {pmid35890848, year = {2022}, author = {Khalil, U and Malik, OA and Uddin, M and Chen, CL}, title = {A Comparative Analysis on Blockchain versus Centralized Authentication Architectures for IoT-Enabled Smart Devices in Smart Cities: A Comprehensive Review, Recent Advances, and Future Research Directions.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {14}, pages = {}, pmid = {35890848}, issn = {1424-8220}, support = {51808474//National Natural Science Foundation of China/ ; }, mesh = {Artificial Intelligence ; *Blockchain ; Cities ; Computer Security ; *Internet of Things ; }, abstract = {Smart devices have become an essential part of the architectures such as the Internet of Things (IoT), Cyber-Physical Systems (CPSs), and Internet of Everything (IoE). In contrast, these architectures constitute a system to realize the concept of smart cities and, ultimately, a smart planet. The adoption of these smart devices expands to different cyber-physical systems in smart city architecture, i.e., smart houses, smart healthcare, smart transportation, smart grid, smart agriculture, etc. The edge of the network connects these smart devices (sensors, aggregators, and actuators) that can operate in the physical environment and collects the data, which is further used to make an informed decision through actuation. Here, the security of these devices is immensely important, specifically from an authentication standpoint, as in the case of unauthenticated/malicious assets, the whole infrastructure would be at stake. We provide an updated review of authentication mechanisms by categorizing centralized and distributed architectures. We discuss the security issues regarding the authentication of these IoT-enabled smart devices. We evaluate and analyze the study of the proposed literature schemes that pose authentication challenges in terms of computational costs, communication overheads, and models applied to attain robustness. Hence, lightweight solutions in managing, maintaining, processing, and storing authentication data of IoT-enabled assets are an urgent need. From an integration perspective, cloud computing has provided strong support. In contrast, decentralized ledger technology, i.e., blockchain, light-weight cryptosystems, and Artificial Intelligence (AI)-based solutions, are the areas with much more to explore. Finally, we discuss the future research challenges, which will eventually help address the ambiguities for improvement.}, } @article {pmid35890825, year = {2022}, author = {Nakazato, J and Li, Z and Maruta, K and Kubota, K and Yu, T and Tran, GK and Sakaguchi, K and Masuko, S}, title = {MEC/Cloud Orchestrator to Facilitate Private/Local Beyond 5G with MEC and Proof-of-Concept Implementation.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {14}, pages = {}, pmid = {35890825}, issn = {1424-8220}, support = {00101//National Institute of Information and Communications Technology/ ; }, mesh = {*Cloud Computing ; Communication ; *Ecosystem ; }, abstract = {The emergence of 5G-IoT opens up unprecedented connectivity possibilities for new service use cases and players. Multi-access edge computing (MEC) is a crucial technology and enabler for Beyond 5G, supporting next-generation communications with service guarantees (e.g., ultra-low latency, high security) from an end-to-end (E2E) perspective. On the other hand, one notable advance is the platform that supports virtualization from RAN to applications. Deploying Radio Access Networks (RAN) and MEC, including third-party applications on virtualization platforms, and renting other equipment from legacy telecom operators will make it easier for new telecom operators, called Private/Local Telecom Operators, to join the ecosystem. Our preliminary studies have discussed the ecosystem for private and local telecom operators regarding business potential and revenue and provided numerical results. What remains is how Private/Local Telecom Operators can manage and deploy their MEC applications. In this paper, we designed the architecture for fully virtualized MEC 5G cellular networks with local use cases (e.g., stadiums, campuses). We propose an MEC/Cloud Orchestrator implementation for intelligent deployment selection. In addition, we provide implementation schemes in several cases held by either existing cloud owners or private and local operators. In order to verify the proposal's feasibility, we designed the system level in E2E and constructed a Beyond 5G testbed at the Ōokayama Campus of the Tokyo Institute of Technology. Through proof-of-concept in the outdoor field, the proposed system's feasibility is verified by E2E performance evaluation. The verification results prove that the proposed approach can reduce latency and provide a more stable throughput than conventional cloud services.}, } @article {pmid35890793, year = {2022}, author = {Wang, Q and Jiang, L and Sun, X and Zhao, J and Deng, Z and Yang, S}, title = {An Efficient LiDAR Point Cloud Map Coding Scheme Based on Segmentation and Frame-Inserting Network.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {14}, pages = {}, pmid = {35890793}, issn = {1424-8220}, support = {No. 62001262//National Natural Science Foundation of China/ ; No. 62001263//National Natural Science Foundation of China/ ; No. ZR2020QF008//Nature Science Foundation of Shandong Province/ ; }, abstract = {In this article, we present an efficient coding scheme for LiDAR point cloud maps. As a point cloud map consists of numerous single scans spliced together, by recording the time stamp and quaternion matrix of each scan during map building, we cast the point cloud map compression into the point cloud sequence compression problem. The coding architecture includes two techniques: intra-coding and inter-coding. For intra-frames, a segmentation-based intra-prediction technique is developed. For inter-frames, an interpolation-based inter-frame coding network is explored to remove temporal redundancy by generating virtual point clouds based on the decoded frames. We only need to code the difference between the original LiDAR data and the intra/inter-predicted point cloud data. The point cloud map can be reconstructed according to the decoded point cloud sequence and quaternion matrices. Experiments on the KITTI dataset show that the proposed coding scheme can largely eliminate the temporal and spatial redundancies. The point cloud map can be encoded to 1/24 of its original size with 2 mm-level precision. Our algorithm also obtains better coding performance compared with the octree and Google Draco algorithms.}, } @article {pmid35890787, year = {2022}, author = {Hussein, M and Mohammed, YS and Galal, AI and Abd-Elrahman, E and Zorkany, M}, title = {Smart Cognitive IoT Devices Using Multi-Layer Perception Neural Network on Limited Microcontroller.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {14}, pages = {}, pmid = {35890787}, issn = {1424-8220}, mesh = {Algorithms ; *Artificial Intelligence ; Cognition ; *Internet of Things ; Neural Networks, Computer ; }, abstract = {The Internet of Things (IoT) era is mainly dependent on the word "Smart", such as smart cities, smart homes, and smart cars. This aspect can be achieved through the merging of machine learning algorithms with IoT computing models. By adding the Artificial Intelligence (AI) algorithms to IoT, the result is the Cognitive IoT (CIoT). In the automotive industry, many researchers worked on self-diagnosis systems using deep learning, but most of them performed this process on the cloud due to the hardware limitations of the end-devices, and the devices obtain the decision via the cloud servers. Others worked with simple traditional algorithms of machine learning to solve these limitations of the processing capabilities of the end-devices. In this paper, a self-diagnosis smart device is introduced with fast responses and little overhead using the Multi-Layer Perceptron Neural Network (MLP-NN) as a deep learning technique. The MLP-NN learning stage is performed using a Tensorflow framework to generate an MLP model's parameters. Then, the MLP-NN model is implemented using these model's parameters on a low cost end-device such as ARM Cortex-M Series architecture. After implementing the MLP-NN model, the IoT implementation is built to publish the decision results. With the proposed implemented method for the smart device, the output decision based on sensors values can be taken by the IoT node itself without returning to the cloud. For comparison, another solution is proposed for the cloud-based architecture, where the MLP-NN model is implemented on Cloud. The results clarify a successful implemented MLP-NN model for little capabilities end-devices, where the smart device solution has a lower traffic and latency than the cloud-based solution.}, } @article {pmid35880010, year = {2022}, author = {Hemalatha, M}, title = {A hybrid random forest deep learning classifier empowered edge cloud architecture for COVID-19 and pneumonia detection.}, journal = {Expert systems with applications}, volume = {210}, number = {}, pages = {118227}, pmid = {35880010}, issn = {0957-4174}, abstract = {COVID-19 is a global pandemic that mostly affects patients' respiratory systems, and the only way to protect oneself against the virus at present moment is to diagnose the illness, isolate the patient, and provide immunization. In the present situation, the testing used to predict COVID-19 is inefficient and results in more false positives. This difficulty can be solved by developing a remote medical decision support system that detects illness using CT scans or X-ray images with less manual interaction and is less prone to errors. The state-of-art techniques mainly used complex deep learning architectures which are not quite effective when deployed in resource-constrained edge devices. To overcome this problem, a multi-objective Modified Heat Transfer Search (MOMHTS) optimized hybrid Random Forest Deep learning (HRFDL) classifier is proposed in this paper. The MOMHTS algorithm mainly optimizes the deep learning model in the HRFDL architecture by optimizing the hyperparameters associated with it to support the resource-constrained edge devices. To evaluate the efficiency of this technique, extensive experimentation is conducted on two real-time datasets namely the COVID19 lung CT scan dataset and the Chest X-ray images (Pneumonia) datasets. The proposed methodology mainly offers increased speed for communication between the IoT devices and COVID-19 detection via the MOMHTS optimized HRFDL classifier is modified to support the resources which can only support minimal computation and handle minimum storage. The proposed methodology offers an accuracy of 99% for both the COVID19 lung CT scan dataset and the Chest X-ray images (Pneumonia) datasets with minimal computational time, cost, and storage. Based on the simulation outcomes, we can conclude that the proposed methodology is an appropriate fit for edge computing detection to identify the COVID19 and pneumonia with higher detection accuracy.}, } @article {pmid35879937, year = {2022}, author = {Siriborvornratanakul, T}, title = {Human behavior in image-based Road Health Inspection Systems despite the emerging AutoML.}, journal = {Journal of big data}, volume = {9}, number = {1}, pages = {96}, pmid = {35879937}, issn = {2196-1115}, abstract = {INTRODUCTION: The emergence of automated machine learning or AutoML has raised an interesting trend of no-code and low-code machine learning where most tasks in the machine learning pipeline can possibly be automated without support from human data scientists. While it sounds reasonable that we should leave repetitive trial-and-error tasks of designing complex network architectures and tuning a lot of hyperparameters to AutoML, leading research using AutoML is still scarce. Thereby, the overall purpose of this case study is to investigate the gap between current AutoML frameworks and practical machine learning development.

CASE DESCRIPTION: First, this paper confirms the increasing trend of AutoML via an indirect indicator of the numbers of search results in Google trend, IEEE Xplore, and ACM Digital Library during 2012-2021. Then, the three most popular AutoML frameworks (i.e., Auto-Sklearn, AutoKeras, and Google Cloud AutoML) are inspected as AutoML's representatives; the inspection includes six comparative aspects. Based on the features available in the three AutoML frameworks investigated, our case study continues to observe recent machine learning research regarding the background of image-based machine learning. This is because the field of computer vision spans several levels of machine learning from basic to advanced and it has been one of the most popular fields in studying machine learning and artificial intelligence lately. Our study is specific to the context of image-based road health inspection systems as it has a long history in computer vision, allowing us to observe solution transitions from past to present.

DISCUSSION AND EVALUATION: After confirming the rising numbers of AutoML search results in the three search engines, our study regarding the three AutoML representatives further reveals that there are many features that can be used to automate the development pipeline of image-based road health inspection systems. Nevertheless, we find that recent works in image-based road health inspection have not used any form of AutoML in their works. Digging into these recent works, there are two main problems that best conclude why most researchers do not use AutoML in their image-based road health inspection systems yet. Firstly, it is because AutoML's trial-and-error decision involves much extra computation compared to human-guided decisions. Secondly, using AutoML adds another layer of non-interpretability to a model. As these two problems are the major pain points in modern neural networks and deep learning, they may require years to resolve, delaying the mass adoption of AutoML in image-based road health inspection systems.

CONCLUSIONS: In conclusion, although AutoML's utilization is not mainstream at this moment, we believe that the trend of AutoML will continue to grow. This is because there exists a demand for AutoML currently, and in the future, more demand for no-code or low-code machine learning development alternatives will grow together with the expansion of machine learning solutions. Nevertheless, this case study focuses on selected papers whose authors are researchers who can publish their works in academic conferences and journals. In the future, the study should continue to include observing novice users, non-programmer users, and machine learning practitioners in order to discover more insights from non-research perspectives.}, } @article {pmid35875731, year = {2022}, author = {Hameed Abdulkareem, K and Awad Mutlag, A and Musa Dinar, A and Frnda, J and Abed Mohammed, M and Hasan Zayr, F and Lakhan, A and Kadry, S and Ali Khattak, H and Nedoma, J}, title = {Smart Healthcare System for Severity Prediction and Critical Tasks Management of COVID-19 Patients in IoT-Fog Computing Environments.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {5012962}, pmid = {35875731}, issn = {1687-5273}, mesh = {Algorithms ; *COVID-19 ; Delivery of Health Care ; Humans ; *Internet of Things ; }, abstract = {COVID-19 has depleted healthcare systems around the world. Extreme conditions must be defined as soon as possible so that services and treatment can be deployed and intensified. Many biomarkers are being investigated in order to track the patient's condition. Unfortunately, this may interfere with the symptoms of other diseases, making it more difficult for a specialist to diagnose or predict the severity level of the case. This research develops a Smart Healthcare System for Severity Prediction and Critical Tasks Management (SHSSP-CTM) for COVID-19 patients. On the one hand, a machine learning (ML) model is projected to predict the severity of COVID-19 disease. On the other hand, a multi-agent system is proposed to prioritize patients according to the seriousness of the COVID-19 condition and then provide complete network management from the edge to the cloud. Clinical data, including Internet of Medical Things (IoMT) sensors and Electronic Health Record (EHR) data of 78 patients from one hospital in the Wasit Governorate, Iraq, were used in this study. Different data sources are fused to generate new feature pattern. Also, data mining techniques such as normalization and feature selection are applied. Two models, specifically logistic regression (LR) and random forest (RF), are used as baseline severity predictive models. A multi-agent algorithm (MAA), consisting of a personal agent (PA) and fog node agent (FNA), is used to control the prioritization process of COVID-19 patients. The highest prediction result is achieved based on data fusion and selected features, where all examined classifiers observe a significant increase in accuracy. Furthermore, compared with state-of-the-art methods, the RF model showed a high and balanced prediction performance with 86% accuracy, 85.7% F-score, 87.2% precision, and 86% recall. In addition, as compared to the cloud, the MAA showed very significant performance where the resource usage was 66% in the proposed model and 34% in the traditional cloud, the delay was 19% in the proposed model and 81% in the cloud, and the consumed energy was 31% in proposed model and 69% in the cloud. The findings of this study will allow for the early detection of three severity cases, lowering mortality rates.}, } @article {pmid35875729, year = {2022}, author = {Zhang, L}, title = {B/S-Based Construction of a Big Data Logistics Platform.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6873062}, pmid = {35875729}, issn = {1687-5273}, mesh = {*Big Data ; *Cloud Computing ; Humans ; }, abstract = {Due to the overwhelming characteristic of the Internet of Things, devices belonging to these networks are utilized in almost every domain of real life in order to improve the lifestyle of humans. However, these networks result in a huge amount of data related to different application domains, leading to another important research aspect, i.e., big data and cloud computing. Big data and cloud computing technologies in the logistics field have experienced initial contact, gradual penetration, and widespread application. Moreover, it supports traditional logistics to upgrade to smart logistics, aiming to achieve the fundamental requirements of today's logistics industry and reduce costs with enhanced efficiency. However, the big data and cloud computing wisdom logistics model still has many problems in the construction of logistics public information platforms, end coordination development, government platform construction, and so on, in order to solve the problems of low efficiency, high cost, and low service satisfaction of traditional logistics. In this article, we have designed a new big data-enabled logistics detection system that is based on B/S architecture, constructed a smart logistics model consisting of a supply subsystem, demand subsystem, and supervision subsystem, and finally realized the operation process of the smart logistics model based on big data cloud computing.}, } @article {pmid35875634, year = {2022}, author = {Chen, X and Gao, T and Gao, H and Liu, B and Chen, M and Wang, B}, title = {A multi-stage heuristic method for service caching and task offloading to improve the cooperation between edge and cloud computing.}, journal = {PeerJ. Computer science}, volume = {8}, number = {}, pages = {e1012}, pmid = {35875634}, issn = {2376-5992}, abstract = {Edge-cloud computing has attracted increasing attention recently due to its efficiency on providing services for not only delay-sensitive applications but also resource-intensive requests, by combining low-latency edge resources and abundant cloud resources. A carefully designed strategy of service caching and task offloading helps to improve the user satisfaction and the resource efficiency. Thus, in this article, we focus on joint service caching and task offloading problem in edge-cloud computing environments, to improve the cooperation between edge and cloud resources. First, we formulated the problem into a mix-integer nonlinear programming, which is proofed as NP-hard. Then, we proposed a three-stage heuristic method for solving the problem in polynomial time. In the first stages, our method tried to make full use of abundant cloud resources by pre-offloading as many tasks as possible to the cloud. Our method aimed at making full use of low-latency edge resources by offloading remaining tasks and caching corresponding services on edge resources. In the last stage, our method focused on improving the performance of tasks offloaded to the cloud, by re-offloading some tasks from cloud resources to edge resources. The performance of our method was evaluated by extensive simulated experiments. The results show that our method has up to 155%, 56.1%, and 155% better performance in user satisfaction, resource efficiency, and processing efficiency, respectively, compared with several classical and state-of-the-art task scheduling methods.}, } @article {pmid35874097, year = {2022}, author = {Huang, CW and Chuang, WH and Lin, CY and Chen, SH}, title = {Elegancy: Digitizing the wisdom from laboratories to the cloud with free no-code platform.}, journal = {iScience}, volume = {25}, number = {8}, pages = {104710}, pmid = {35874097}, issn = {2589-0042}, abstract = {One of the top priorities in any laboratory is archiving experimental data in the most secure, efficient, and errorless way. It is especially important to those in chemical and biological research, for it is more likely to damage experiment records. In addition, the transmission of experiment results from paper to electronic devices is time-consuming and redundant. Therefore, we introduce an open-source no-code electronic laboratory notebook, Elegancy, a cloud-based/standalone web service distributed as a Docker image. Elegancy fits all laboratories but is specially equipped with several features benefitting biochemical laboratories. It can be accessed via various web browsers, allowing researchers to upload photos or audio recordings directly from their mobile devices. Elegancy also contains a meeting arrangement module, audit/revision control, and laboratory supply management system. We believe Elegancy could help the scientific research community gather evidence, share information, reorganize knowledge, and digitize laboratory works with greater ease and security.}, } @article {pmid35873307, year = {2022}, author = {Rodas-Martinez, AK and Altamirano-Yupanqui, JR}, title = {[Mass vaccinations against COVID-19 through the use of technologies for the management of appointment scheduling and data of large volumes of vaccinated].}, journal = {Vacunas}, volume = {23}, number = {}, pages = {S111-S120}, pmid = {35873307}, issn = {1576-9887}, abstract = {Mass vaccination against COVID-19 using technologies to manage appointment scheduling and data in large volumes of vaccinated people Abstract Mass vaccination poses a challenge for health authorities due to the high volume of people who need to be vaccinated in a short period of time. Manual processes in vaccination centres to record and control vaccinations where the data is entered on paper result in delays in the timely input of information rendering the vaccination process inefficient. The proposed prototype, as a strategy for mass COVID-19 vaccination, to generate appointments, record, and control entry to vaccination centres, uses mobile technology, QR codes, and cloud computing to automate these data-driven processes. Technology-based processes help people by giving them the flexibility to choose the most convenient vaccination centre and provide health authorities with data-driven tools for management, control, and real-time decision-making.}, } @article {pmid35870448, year = {2022}, author = {Abe, T and Kinsella, I and Saxena, S and Buchanan, EK and Couto, J and Briggs, J and Kitt, SL and Glassman, R and Zhou, J and Paninski, L and Cunningham, JP}, title = {Neuroscience Cloud Analysis As a Service: An open-source platform for scalable, reproducible data analysis.}, journal = {Neuron}, volume = {110}, number = {17}, pages = {2771-2789.e7}, pmid = {35870448}, issn = {1097-4199}, support = {T32 NS064929/NS/NINDS NIH HHS/United States ; UF1 NS107696/NS/NINDS NIH HHS/United States ; RF1 MH120680/MH/NIMH NIH HHS/United States ; U19 NS107613/NS/NINDS NIH HHS/United States ; U19 NS104649/NS/NINDS NIH HHS/United States ; UF1 NS108213/NS/NINDS NIH HHS/United States ; U19 NS123716/NS/NINDS NIH HHS/United States ; U01 NS103489/NS/NINDS NIH HHS/United States ; }, mesh = {Cloud Computing ; *Data Analysis ; *Neurosciences ; Reproducibility of Results ; Software ; }, abstract = {A key aspect of neuroscience research is the development of powerful, general-purpose data analyses that process large datasets. Unfortunately, modern data analyses have a hidden dependence upon complex computing infrastructure (e.g., software and hardware), which acts as an unaddressed deterrent to analysis users. Although existing analyses are increasingly shared as open-source software, the infrastructure and knowledge needed to deploy these analyses efficiently still pose significant barriers to use. In this work, we develop Neuroscience Cloud Analysis As a Service (NeuroCAAS): a fully automated open-source analysis platform offering automatic infrastructure reproducibility for any data analysis. We show how NeuroCAAS supports the design of simpler, more powerful data analyses and that many popular data analysis tools offered through NeuroCAAS outperform counterparts on typical infrastructure. Pairing rigorous infrastructure management with cloud resources, NeuroCAAS dramatically accelerates the dissemination and use of new data analyses for neuroscientific discovery.}, } @article {pmid35867406, year = {2022}, author = {Merdan, O and Şişman, AS and Aksoy, SA and Kızıl, S and Tüzemen, NÜ and Yılmaz, E and Ener, B}, title = {Investigation of the Defective Growth Pattern and Multidrug Resistance in a Clinical Isolate of Candida glabrata Using Whole-Genome Sequencing and Computational Biology Applications.}, journal = {Microbiology spectrum}, volume = {10}, number = {4}, pages = {e0077622}, pmid = {35867406}, issn = {2165-0497}, mesh = {*Amphotericin B/metabolism/pharmacology ; Animals ; Antifungal Agents/pharmacology ; Artificial Intelligence ; Azoles/metabolism/pharmacology ; *Candida glabrata/genetics ; Cholesterol/metabolism/pharmacology ; Computational Biology ; Drug Resistance, Fungal/genetics ; Drug Resistance, Multiple ; Ergosterol/metabolism ; Microbial Sensitivity Tests ; Sheep ; }, abstract = {Candida glabrata is increasingly isolated from blood cultures, and multidrug-resistant isolates have important implications for therapy. This study describes a cholesterol-dependent clinical C. glabrata isolate (ML72254) that did not grow without blood (containing cholesterol) on routine mycological media and that showed azole and amphotericin B (AmB) resistance. Matrix-assisted laser desorption ionization-time of flight (MALDI-TOF) and whole-genome sequencing (WGS) were used for species identification. A modified Etest method (Mueller-Hinton agar supplemented with 5% sheep blood) was used for antifungal susceptibility testing. WGS data were processed via the Galaxy platform, and the genomic variations of ML72254 were retrieved. A computational biology workflow utilizing web-based applications (PROVEAN, AlphaFold Colab, and Missense3D) was constructed to predict possible deleterious effects of these missense variations on protein functions. The predictive ability of this workflow was tested with previously reported missense variations in ergosterol synthesis genes of C. glabrata. ML72254 was identified as C. glabrata sensu stricto with MALDI-TOF, and WGS confirmed this identification. The MICs of fluconazole, voriconazole, and amphotericin B were >256, >32, and >32 μg/mL, respectively. A novel frameshift mutation in the ERG1 gene (Pro314fs) and many missense variations were detected in the ergosterol synthesis genes. None of the missense variations in the ML72254 ergosterol synthesis genes were deleterious, and the Pro314fs mutation was identified as the causative molecular change for a cholesterol-dependent and multidrug-resistant phenotype. This study verified that web-based computational biology solutions can be powerful tools for examining the possible impacts of missense mutations in C. glabrata. IMPORTANCE In this study, a cholesterol-dependent C. glabrata clinical isolate that confers azole and AmB resistance was investigated using artificial intelligence (AI) technologies and cloud computing applications. This is the first of the known cholesterol-dependent C. glabrata isolate to be found in Turkey. Cholesterol-dependent C. glabrata isolates are rarely isolated in clinical samples; they can easily be overlooked during routine laboratory procedures. Microbiologists therefore need to be alert when discrepancies occur between microscopic examination and growth on routine media. In addition, because these isolates confer antifungal resistance, patient management requires extra care.}, } @article {pmid35866176, year = {2021}, author = {Zhou, H and Ouyang, X and Su, J and de Laat, C and Zhao, Z}, title = {Enforcing trustworthy cloud SLA with witnesses: A game theory-based model using smart contracts.}, journal = {Concurrency and computation : practice & experience}, volume = {33}, number = {14}, pages = {e5511}, doi = {10.1002/cpe.5511}, pmid = {35866176}, issn = {1532-0626}, abstract = {There lacks trust between the cloud customer and provider to enforce traditional cloud SLA (Service Level Agreement) where the blockchain technique seems a promising solution. However, current explorations still face challenges to prove that the off-chain SLO (Service Level Objective) violations really happen before recorded into the on-chain transactions. In this paper, a witness model is proposed implemented with smart contracts to solve this trust issue. The introduced role, "Witness", gains rewards as an incentive for performing the SLO violation report, and the payoff function is carefully designed in a way that the witness has to tell the truth, for maximizing the rewards. This fact that the witness has to be honest is analyzed and proved using the Nash Equilibrium principle of game theory. For ensuring the chosen witnesses are random and independent, an unbiased selection algorithm is proposed to avoid possible collusions. An auditing mechanism is also introduced to detect potential malicious witnesses. Specifically, we define three types of malicious behaviors and propose quantitative indicators to audit and detect these behaviors. Moreover, experimental studies based on Ethereum blockchain demonstrate the proposed model is feasible, and indicate that the performance, ie, transaction fee, of each interface follows the design expectations.}, } @article {pmid35865872, year = {2022}, author = {Zhou, Y}, title = {The Application Trend of Digital Finance and Technological Innovation in the Development of Green Economy.}, journal = {Journal of environmental and public health}, volume = {2022}, number = {}, pages = {1064558}, pmid = {35865872}, issn = {1687-9813}, mesh = {*Artificial Intelligence ; Conservation of Energy Resources ; Economic Development ; *Inventions ; Sustainable Development ; }, abstract = {Based on the perspective of digital finance and technological innovation, this paper analyzes its application in economic development, green economy, and sustainable development. With the continuous development of technological economy, methods such as artificial intelligence, Internet of Things, big data, and cloud computing become increasingly mature. Economic development is inseparable from the empowerment of technology. In this paper, firstly, we introduce the basic concepts and main forms of digital finance and technological economy and list the cutting-edge technologies including blockchain, VR, sharing economy, and other modes. Then, we analyze the application trend of technology economy. Finally, we analyze the examples of digital finance and technological innovation in detail, including tourism economy, digital marketing, sharing economy, smart city, digital healthcare, and personalized education, three hot topics of technology intersection and integration. In the end, we put forward prospects for the development of a digital economy, digital finance, and technological innovation.}, } @article {pmid35860795, year = {2022}, author = {Yan, L and Chen, Y and Caixia, G and Jiangying, W and Xiaoying, L and Zhe, L}, title = {Medical Big Data and Postoperative Nursing of Fracture Patients Based on Cloud Computing.}, journal = {BioMed research international}, volume = {2022}, number = {}, pages = {4090235}, pmid = {35860795}, issn = {2314-6141}, mesh = {*Big Data ; Cloud Computing ; *Fractures, Bone/surgery ; Humans ; *Postoperative Care/nursing ; Reproducibility of Results ; Wireless Technology ; }, abstract = {Based on the standards for wireless sensor system identification, the sensor node identity OID identification and the management object OID identification in the SNMP MIB are merged, and a management object OID identification coding mechanism for the SNMP-based wireless sensor system is proposed to make the node management system only. The identity, attributes, and multiple entities of the target sensor node in the wireless sensor network can be identified and managed by the node management object OID. The source of abnormal medical big data generally uses two models of multidimensional data and sliding window for detection and verification. First, the sliding window can be used to detect abnormalities. The result is that under this condition, the detection rate of medical big data is more than 95%; the effect is very good, but in different dimensions, the detection rate of four-dimensional data is 2.9% higher than that of a single-dimensional one. On the basis of the ZigBee wireless network, the terminal signal transmission of fracture treatment can be realized. On this basis, combined with the actual needs of fracture treatment, it can be built with its wireless module. The wireless network has a certain basic function. The reform of the nursing system was carried out on the basis of the safety and reliability of the nursing system, the efficiency of the nursing system was improved, and timely and safe nursing services were achieved.}, } @article {pmid35860647, year = {2022}, author = {Li, H}, title = {Computer Security Issues and Legal System Based on Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8112212}, pmid = {35860647}, issn = {1687-5273}, mesh = {*Cloud Computing ; *Computer Security ; Confidentiality ; Information Storage and Retrieval ; Software ; }, abstract = {To effectively improve the security and accuracy of computer information storage, a computer security problem and legal system based on cloud computing are proposed. Firstly, this article details the evolution of cloud computing, its characteristics, architecture, and application status of cloud computing in detail. Second, we discussed security strategies to ensure the confidentiality and integrity of cloud computing information, focuses on the data encryption technology of cloud data security, and designs and implements the data backup and recovery system based on the cloud platform. The core layers of the system are the system layer and data operation layer. The system uses multithreading technology based on epoll and thread pool to improve the efficiency of data transmission. At the same time, the basic visual page is realized, and users can use the page to create a convenient operating system. Finally, the system is built in the laboratory environment and tested as a whole. The test results show that through the performance comparison with the current commonly used systems, it is found that the system in this paper has a certain improvement in data transmission rate, but the utilization rate of node CPU is as high as 40%, which leads to certain requirements for node CPU performance. Therefore, the system meets the functional requirements proposed in the design. Compared to the existing system, its performance has been found to meet the actual requirements of use, proving that the system is accessible and efficient.}, } @article {pmid35858750, year = {2022}, author = {, }, title = {Diversifying the genomic data science research community.}, journal = {Genome research}, volume = {32}, number = {7}, pages = {1231-1241}, pmid = {35858750}, issn = {1549-5469}, support = {U24 HG010263/HG/NHGRI NIH HHS/United States ; R01 CA230514/CA/NCI NIH HHS/United States ; U54 MD007601/MD/NIMHD NIH HHS/United States ; U24 HG010262/HG/NHGRI NIH HHS/United States ; R21 DC020560/DC/NIDCD NIH HHS/United States ; R01 CA223490/CA/NCI NIH HHS/United States ; P30 CA071789/CA/NCI NIH HHS/United States ; P20 GM103466/GM/NIGMS NIH HHS/United States ; P20 GM139753/GM/NIGMS NIH HHS/United States ; }, mesh = {Humans ; *Genomics/methods ; Data Science ; United States ; Universities ; Curriculum ; }, abstract = {Over the past 20 years, the explosion of genomic data collection and the cloud computing revolution have made computational and data science research accessible to anyone with a web browser and an internet connection. However, students at institutions with limited resources have received relatively little exposure to curricula or professional development opportunities that lead to careers in genomic data science. To broaden participation in genomics research, the scientific community needs to support these programs in local education and research at underserved institutions (UIs). These include community colleges, historically Black colleges and universities, Hispanic-serving institutions, and tribal colleges and universities that support ethnically, racially, and socioeconomically underrepresented students in the United States. We have formed the Genomic Data Science Community Network to support students, faculty, and their networks to identify opportunities and broaden access to genomic data science. These opportunities include expanding access to infrastructure and data, providing UI faculty development opportunities, strengthening collaborations among faculty, recognizing UI teaching and research excellence, fostering student awareness, developing modular and open-source resources, expanding course-based undergraduate research experiences (CUREs), building curriculum, supporting student professional development and research, and removing financial barriers through funding programs and collaborator support.}, } @article {pmid35854299, year = {2022}, author = {Wang, R and Han, J and Liu, C and Wang, L}, title = {Relationship between medical students' perceived instructor role and their approaches to using online learning technologies in a cloud-based virtual classroom.}, journal = {BMC medical education}, volume = {22}, number = {1}, pages = {560}, pmid = {35854299}, issn = {1472-6920}, mesh = {Cloud Computing ; Cross-Sectional Studies ; *Education, Distance/methods ; Humans ; *Students, Medical ; Universities ; }, abstract = {BACKGROUND: Students can take different approaches to using online learning technologies: deep and surface. It is important to understand the relationship between instructor role and student approaches to using online learning technologies in online learning settings supported by cloud computing techniques.

METHODS: A descriptive, cross-sectional study was conducted to analyze the relationships between medical students' perceptions of instructor role (instructor support, instructor-student interaction, and instructor innovation) and students' approaches to using online learning technologies in cloud-based virtual classrooms. A 25-item online questionnaire along with a sheet with basic demographic was administered to all medical students at Qilu Medical Schools of Shandong University China. Overall, 213 of 4000 medical students (5.34%) at the medical school participated in the survey.

RESULTS: The results showed high levels of medical students' perceived instructor support, instructor-student interaction and instructor innovation. Most students adopted the deep approaches to using online learning technologies. Instructor support, instructor-student interaction and innovation were positively related to students' deep approaches to using online learning technologies. Instructor support was negatively related to students' surface approaches to using online learning technologies.

CONCLUSIONS: The relationship between instructor role (instructor support, instructor-student interaction and instructor innovation) and students' approaches to using online learning technologies highlight the importance of instructor support and innovation in facilitating students' adoption of desirable approaches to learning from the application of technologies.}, } @article {pmid35850085, year = {2022}, author = {Peng, Y and Sengupta, D and Duan, Y and Chen, C and Tian, B}, title = {Accurate mapping of Chinese coastal aquaculture ponds using biophysical parameters based on Sentinel-2 time series images.}, journal = {Marine pollution bulletin}, volume = {181}, number = {}, pages = {113901}, doi = {10.1016/j.marpolbul.2022.113901}, pmid = {35850085}, issn = {1879-3363}, mesh = {Aquaculture/methods ; *Environmental Monitoring ; *Ponds ; Time Factors ; Water ; }, abstract = {Aquaculture plays a crucial role in the global food security and nutrition supply, where China accounts for the largest market share. Although there are some studies that focus on large-scale extraction of coastal aquaculture ponds from satellite images, they have often variable accuracies and encounter misclassification due to the similar geometric characteristics of various vivid water bodies. This paper proposes an efficient and novel method that integrates the spatial characteristics and three biophysical parameters (Chlorophyll-a, Trophic State Index, and Floating Algae Index) to map coastal aquaculture ponds at a national scale. These parameters are derived from bio-optical models based on the Google Earth Engine (GEE) cloud computing platform and time series of high-resolution Sentinel-2 images. Our proposed method effectively addresses the misclassification issue between the aquaculture ponds and rivers, lakes, reservoirs, and salt pans and achieves an overall accuracy of 91 % and a Kappa coefficient of 0.83 in the Chinese coastal zone. Our results indicate that the total area of Chinese coastal aquaculture ponds was 1,039,214 ha in 2019, mainly distributed in the Shandong and Guangdong provinces. The highest aquaculture intensity occurs within the 1 km coastal buffer zone, accounting for 22.4 % of the total area. Furthermore, more than half of the Chinese coastal aquaculture ponds are concentrated in the 0-5 km buffer zone. Our method is of general applicability and thus is suitable for large-scale aquaculture ponds mapping projects. Moreover, the biophysical parameters we employ can be considered as new indicators for the classification of various water bodies even with different aquaculture species.}, } @article {pmid35846728, year = {2022}, author = {Yi, J and Zhang, H and Mao, J and Chen, Y and Zhong, H and Wang, Y}, title = {Review on the COVID-19 pandemic prevention and control system based on AI.}, journal = {Engineering applications of artificial intelligence}, volume = {114}, number = {}, pages = {105184}, pmid = {35846728}, issn = {0952-1976}, abstract = {As a new technology, artificial intelligence (AI) has recently received increasing attention from researchers and has been successfully applied to many domains. Currently, the outbreak of the COVID-19 pandemic has not only put people's lives in jeopardy but has also interrupted social activities and stifled economic growth. Artificial intelligence, as the most cutting-edge science field, is critical in the fight against the pandemic. To respond scientifically to major emergencies like COVID-19, this article reviews the use of artificial intelligence in the combat against the pandemic from COVID-19 large data, intelligent devices and systems, and intelligent robots. This article's primary contributions are in two aspects: (1) we summarized the applications of AI in the pandemic, including virus spreading prediction, patient diagnosis, vaccine development, excluding potential virus carriers, telemedicine service, economic recovery, material distribution, disinfection, and health care. (2) We concluded the faced challenges during the AI-based pandemic prevention process, including multidimensional data, sub-intelligent algorithms, and unsystematic, and discussed corresponding solutions, such as 5G, cloud computing, and unsupervised learning algorithms. This article systematically surveyed the applications and challenges of AI technology during the pandemic, which is of great significance to promote the development of AI technology and can serve as a new reference for future emergencies.}, } @article {pmid35845885, year = {2022}, author = {Yao, Y and Li, S}, title = {Design and Analysis of Intelligent Robot Based on Internet of Things Technology.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7304180}, pmid = {35845885}, issn = {1687-5273}, mesh = {Cloud Computing ; Humans ; Intelligence ; *Internet of Things ; *Robotics ; }, abstract = {This research uses Auto-ID Labs radio frequency identification system to realize the information dissemination from the destination node to the nodes in its neighborhood. The purpose is to forward messages and explore typical applications. Realize the intelligent analysis and management of IoT devices and data. Design a set of edge video CDN system, in the G1 data set A = 9, p = 9, ℤp = 9, lℤp = 8, AES = 5, ES = 9. Distribute some hot content to public wireless hotspots closer to users in advance, A = 9, p = 7, ℤp = 9, lℤp = 9, AES = 9, ES = 8. At present, a large amount of research is mainly to deploy an edge node between the end node of the Internet of Things and the cloud computing center to provide high-quality services. By learning a stable dynamic system from human teaching to ensure the robustness of the controller to spatial disturbances. FPP-SCA plan FPP-SCA = 1.99, FPP-SCA = 1.86, FPP-SCA = 1.03, FPP-SCA = 1.18, FPP-SCA = 1.01, FPP-SCA = 1.46, FPP-SCA = 1.61.The more robots work in an unstructured environment, with different scenarios and tasks, the comparison shows that the FPP-SCA scheme is the optimal model F-S0 = 2.52, F-S5 = 2.38, F-S10 = 2.5, F- S15 = 2.09, F-S20 = 2.54, F-S25 = 2.8, F-S30 = 2.98.}, } @article {pmid35843990, year = {2022}, author = {Kölzsch, A and Davidson, SC and Gauggel, D and Hahn, C and Hirt, J and Kays, R and Lang, I and Lohr, A and Russell, B and Scharf, AK and Schneider, G and Vinciguerra, CM and Wikelski, M and Safi, K}, title = {MoveApps: a serverless no-code analysis platform for animal tracking data.}, journal = {Movement ecology}, volume = {10}, number = {1}, pages = {30}, pmid = {35843990}, issn = {2051-3933}, support = {80NSSC21K1182/NASA/NASA/United States ; }, abstract = {BACKGROUND: Bio-logging and animal tracking datasets continuously grow in volume and complexity, documenting animal behaviour and ecology in unprecedented extent and detail, but greatly increasing the challenge of extracting knowledge from the data obtained. A large variety of analysis methods are being developed, many of which in effect are inaccessible to potential users, because they remain unpublished, depend on proprietary software or require significant coding skills.

RESULTS: We developed MoveApps, an open analysis platform for animal tracking data, to make sophisticated analytical tools accessible to a global community of movement ecologists and wildlife managers. As part of the Movebank ecosystem, MoveApps allows users to design and share workflows composed of analysis modules (Apps) that access and analyse tracking data. Users browse Apps, build workflows, customise parameters, execute analyses and access results through an intuitive web-based interface. Apps, coded in R or other programming languages, have been developed by the MoveApps team and can be contributed by anyone developing analysis code. They become available to all user of the platform. To allow long-term and cross-system reproducibility, Apps have public source code and are compiled and run in Docker containers that form the basis of a serverless cloud computing system. To support reproducible science and help contributors document and benefit from their efforts, workflows of Apps can be shared, published and archived with DOIs in the Movebank Data Repository. The platform was beta launched in spring 2021 and currently contains 49 Apps that are used by 316 registered users. We illustrate its use through two workflows that (1) provide a daily report on active tag deployments and (2) segment and map migratory movements.

CONCLUSIONS: The MoveApps platform is meant to empower the community to supply, exchange and use analysis code in an intuitive environment that allows fast and traceable results and feedback. By bringing together analytical experts developing movement analysis methods and code with those in need of tools to explore, answer questions and inform decisions based on data they collect, we intend to increase the pace of knowledge generation and integration to match the huge growth rate in bio-logging data acquisition.}, } @article {pmid35829789, year = {2022}, author = {Mozaffaree Pour, N and Karasov, O and Burdun, I and Oja, T}, title = {Simulation of land use/land cover changes and urban expansion in Estonia by a hybrid ANN-CA-MCA model and utilizing spectral-textural indices.}, journal = {Environmental monitoring and assessment}, volume = {194}, number = {8}, pages = {584}, pmid = {35829789}, issn = {1573-2959}, support = {PRG352//Eesti Teadusagentuur/ ; PEATSPEC//Academy of Finland/ ; decision no 341963//Academy of Finland/ ; }, mesh = {Agriculture ; *Conservation of Natural Resources ; *Environmental Monitoring ; Estonia ; Wetlands ; }, abstract = {Over the recent two decades, land use/land cover (LULC) drastically changed in Estonia. Even though the population decreased by 11%, noticeable agricultural and forest land areas were turned into urban land. In this work, we analyzed those LULC changes by mapping the spatial characteristics of LULC and urban expansion in the years 2000-2019 in Estonia. Moreover, using the revealed spatiotemporal transitions of LULC, we simulated LULC and urban expansion for 2030. Landsat 5 and 8 data were used to estimate 147 spectral-textural indices in the Google Earth Engine cloud computing platform. After that, 19 selected indices were used to model LULC changes by applying the hybrid artificial neural network, cellular automata, and Markov chain analysis (ANN-CA-MCA). While determining spectral-textural indices is quite common for LULC classifications, utilization of these continues indices in LULC change detection and examining these indices at the landscape scale is still in infancy. This country-wide modeling approach provided the first comprehensive projection of future LULC utilizing spectral-textural indices. In this work, we utilized the hybrid ANN-CA-MCA model for predicting LULC in Estonia for 2030; we revealed that the predicted changes in LULC from 2019 to 2030 were similar to the observed changes from 2011 to 2019. The predicted change in the area of artificial surfaces was an increased rate of 1.33% to reach 787.04 km[2] in total by 2030. Between 2019 and 2030, the other significant changes were the decrease of 34.57 km[2] of forest lands and the increase of agricultural lands by 14.90 km[2] and wetlands by 9.31 km[2]. These findings can develop a proper course of action for long-term spatial planning in Estonia. Therefore, a key policy priority should be to plan for the stable care of forest lands to maintain biodiversity.}, } @article {pmid35816521, year = {2023}, author = {Singh, P and Gaba, GS and Kaur, A and Hedabou, M and Gurtov, A}, title = {Dew-Cloud-Based Hierarchical Federated Learning for Intrusion Detection in IoMT.}, journal = {IEEE journal of biomedical and health informatics}, volume = {27}, number = {2}, pages = {722-731}, doi = {10.1109/JBHI.2022.3186250}, pmid = {35816521}, issn = {2168-2208}, mesh = {Humans ; Cloud Computing ; *COVID-19 ; *Internet of Things ; Internet ; Algorithms ; }, abstract = {The coronavirus pandemic has overburdened medical institutions, forcing physicians to diagnose and treat their patients remotely. Moreover, COVID-19 has made humans more conscious about their health, resulting in the extensive purchase of IoT-enabled medical devices. The rapid boom in the market worth of the internet of medical things (IoMT) captured cyber attackers' attention. Like health, medical data is also sensitive and worth a lot on the dark web. Despite the fact that the patient's health details have not been protected appropriately, letting the trespassers exploit them. The system administrator is unable to fortify security measures due to the limited storage capacity and computation power of the resource-constrained network devices'. Although various supervised and unsupervised machine learning algorithms have been developed to identify anomalies, the primary undertaking is to explore the swift progressing malicious attacks before they deteriorate the wellness system's integrity. In this paper, a Dew-Cloud based model is designed to enable hierarchical federated learning (HFL). The proposed Dew-Cloud model provides a higher level of data privacy with greater availability of IoMT critical application(s). The hierarchical long-term memory (HLSTM) model is deployed at distributed Dew servers with a backend supported by cloud computing. Data pre-processing feature helps the proposed model achieve high training accuracy (99.31%) with minimum training loss (0.034). The experiment results demonstrate that the proposed HFL-HLSTM model is superior to existing schemes in terms of performance metrics such as accuracy, precision, recall, and f-score.}, } @article {pmid35808479, year = {2022}, author = {Romeo, L and Marani, R and Perri, AG and D'Orazio, T}, title = {Microsoft Azure Kinect Calibration for Three-Dimensional Dense Point Clouds and Reliable Skeletons.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808479}, issn = {1424-8220}, support = {CDS000750//Italian Ministry of Economic Development LAMPO "Leonardo Automated Manufacturing Processes for cOmposites"/ ; }, mesh = {Calibration ; *Gestures ; Humans ; *Skeleton ; }, abstract = {Nowadays, the need for reliable and low-cost multi-camera systems is increasing for many potential applications, such as localization and mapping, human activity recognition, hand and gesture analysis, and object detection and localization. However, a precise camera calibration approach is mandatory for enabling further applications that require high precision. This paper analyzes the available two-camera calibration approaches to propose a guideline for calibrating multiple Azure Kinect RGB-D sensors to achieve the best alignment of point clouds in both color and infrared resolutions, and skeletal joints returned by the Microsoft Azure Body Tracking library. Different calibration methodologies using 2D and 3D approaches, all exploiting the functionalities within the Azure Kinect devices, are presented. Experiments demonstrate that the best results are returned by applying 3D calibration procedures, which give an average distance between all couples of corresponding points of point clouds in color or an infrared resolution of 21.426 mm and 9.872 mm for a static experiment and of 20.868 mm and 7.429 mm while framing a dynamic scene. At the same time, the best results in body joint alignment are achieved by three-dimensional procedures on images captured by the infrared sensors, resulting in an average error of 35.410 mm.}, } @article {pmid35808459, year = {2022}, author = {Khan, A and Umar, AI and Shirazi, SH and Ishaq, W and Shah, M and Assam, M and Mohamed, A}, title = {QoS-Aware Cost Minimization Strategy for AMI Applications in Smart Grid Using Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808459}, issn = {1424-8220}, mesh = {*Cloud Computing ; Computer Simulation ; *Computer Systems ; Models, Theoretical ; Software ; }, abstract = {Cloud computing coupled with Internet of Things technology provides a wide range of cloud services such as memory, storage, computational processing, network bandwidth, and database application to the end users on demand over the Internet. More specifically, cloud computing provides efficient services such as "pay as per usage". However, Utility providers in Smart Grid are facing challenges in the design and implementation of such architecture in order to minimize the cost of underlying hardware, software, and network services. In Smart Grid, smart meters generate a large volume of different traffics, due to which efficient utilization of available resources such as buffer, storage, limited processing, and bandwidth is required in a cost-effective manner in the underlying network infrastructure. In such context, this article introduces a QoS-aware Hybrid Queue Scheduling (HQS) model that can be seen over the IoT-based network integrated with cloud environment for different advanced metering infrastructure (AMI) application traffic, which have different QoS levels in the Smart Grid network. The proposed optimization model supports, classifies, and prioritizes the AMI application traffic. The main objective is to reduce the cost of buffer, processing power, and network bandwidth utilized by AMI applications in the cloud environment. For this, we developed a simulation model in the CloudSim simulator that uses a simple mathematical model in order to achieve the objective function. During the simulations, the effects of various numbers of cloudlets on the cost of virtual machine resources such as RAM, CPU processing, and available bandwidth have been investigated in cloud computing. The obtained simulation results exhibited that our proposed model successfully competes with the previous schemes in terms of minimizing the processing, memory, and bandwidth cost by a significant margin. Moreover, the simulation results confirmed that the proposed optimization model behaves as expected and is realistic for AMI application traffic in the Smart Grid network using cloud computing.}, } @article {pmid35808452, year = {2022}, author = {Shen, X and Chang, Z and Niu, S}, title = {Mobile Edge Computing Task Offloading Strategy Based on Parking Cooperation in the Internet of Vehicles.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808452}, issn = {1424-8220}, support = {61961010//National Natural Science Foundation of China/ ; No.AA19046004//Guangxi Science and technology major special projects/ ; YCSW2022314//Innovation Project of Guangxi Graduate Education/ ; }, mesh = {*Cloud Computing ; Computer Simulation ; *Internet ; }, abstract = {Due to the limited computing capacity of onboard devices, they can no longer meet a large number of computing requirements. Therefore, mobile edge computing (MEC) provides more computing and storage capabilities for vehicles. Inspired by a large number of roadside parking vehicles, this paper takes the roadside parking vehicles with idle computing resources as the task offloading platform and proposes a mobile edge computing task offloading strategy based on roadside parking cooperation. The resource sharing and mutual utilization among roadside vehicles, roadside units (RSU), and cloud servers (cloud servers) were established, and the collaborative offloading problem of computing tasks was transformed into a constraint problem. The hybrid genetic algorithm (HHGA) with a mountain-climbing operator was used to solve the multi-constraint problem, to reduce the delay and energy consumption of computing tasks. The simulation results show that when the number of tasks is 25, the delay and energy consumption of the HHGA algorithm is improved by 24.1% and 11.9%, respectively, compared with Tradition. When the task size is 1.0 MB, the HHGA algorithm reduces the system overhead by 7.9% compared with Tradition. Therefore, the proposed scheme can effectively reduce the total system cost during task offloading.}, } @article {pmid35808373, year = {2022}, author = {Loukatos, D and Lygkoura, KA and Maraveas, C and Arvanitis, KG}, title = {Enriching IoT Modules with Edge AI Functionality to Detect Water Misuse Events in a Decentralized Manner.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808373}, issn = {1424-8220}, mesh = {Agriculture ; *Artificial Intelligence ; Humans ; *Internet of Things ; Machine Learning ; Water ; }, abstract = {The digital transformation of agriculture is a promising necessity for tackling the increasing nutritional needs of the population on Earth and the degradation of natural resources. Focusing on the "hot" area of natural resource preservation, the recent appearance of more efficient and cheaper microcontrollers, the advances in low-power and long-range radios, and the availability of accompanying software tools are exploited in order to monitor water consumption and to detect and report misuse events, with reduced power and network bandwidth requirements. Quite often, large quantities of water are wasted for a variety of reasons; from broken irrigation pipes to people's negligence. To tackle this problem, the necessary design and implementation details are highlighted for an experimental water usage reporting system that exhibits Edge Artificial Intelligence (Edge AI) functionality. By combining modern technologies, such as Internet of Things (IoT), Edge Computing (EC) and Machine Learning (ML), the deployment of a compact automated detection mechanism can be easier than before, while the information that has to travel from the edges of the network to the cloud and thus the corresponding energy footprint are drastically reduced. In parallel, characteristic implementation challenges are discussed, and a first set of corresponding evaluation results is presented.}, } @article {pmid35808368, year = {2022}, author = {Sefati, SS and Halunga, S}, title = {A Hybrid Service Selection and Composition for Cloud Computing Using the Adaptive Penalty Function in Genetic and Artificial Bee Colony Algorithm.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808368}, issn = {1424-8220}, support = {861219//Mobility and Training foR beyond 5G ecosystems (MOTOR5G)'. The project has received funding from the European Union's Horizon 2020 programme under the Marie Skłodowska Curie Actions (MSCA)/ ; }, mesh = {*Algorithms ; *Cloud Computing ; Reproducibility of Results ; }, abstract = {The rapid development of Cloud Computing (CC) has led to the release of many services in the cloud environment. Service composition awareness of Quality of Service (QoS) is a significant challenge in CC. A single service in the cloud environment cannot respond to the complex requests and diverse requirements of the real world. In some cases, one service cannot fulfill the user's needs, so it is necessary to combine different services to meet these requirements. Many available services provide an enormous QoS and selecting or composing those combined services is called an Np-hard optimization problem. One of the significant challenges in CC is integrating existing services to meet the intricate necessities of different types of users. Due to NP-hard complexity of service composition, many metaheuristic algorithms have been used so far. This article presents the Artificial Bee Colony and Genetic Algorithm (ABCGA) as a metaheuristic algorithm to achieve the desired goals. If the fitness function of the services selected by the Genetic Algorithm (GA) is suitable, a set of services is further introduced for the Artificial Bee Colony (ABC) algorithm to choose the appropriate service from, according to each user's needs. The proposed solution is evaluated through experiments using Cloud SIM simulation, and the numerical results prove the efficiency of the proposed method with respect to reliability, availability, and cost.}, } @article {pmid35808345, year = {2022}, author = {Shahzad, A and Gherbi, A and Zhang, K}, title = {Enabling Fog-Blockchain Computing for Autonomous-Vehicle-Parking System: A Solution to Reinforce IoT-Cloud Platform for Future Smart Parking.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808345}, issn = {1424-8220}, mesh = {*Blockchain ; Cloud Computing ; Computer Security ; Humans ; Privacy ; }, abstract = {With the advent of modern technologies, including the IoT and blockchain, smart-parking (SP) systems are becoming smarter and smarter. Similar to other automated systems, and particularly those that require automation or minimal interaction with humans, the SP system is heuristic in delivering performances, such as throughput in terms of latency, efficiency, privacy, and security, and it is considered a long-term cost-effective solution. This study looks ahead to future trends and developments in SP systems and presents an inclusive, long-term, effective, and well-performing smart autonomous vehicle parking (SAVP) system that explores and employs the emerging fog-computing and blockchain technologies as robust solutions to strengthen the existing collaborative IoT-cloud platform to build and manage SP systems for autonomous vehicles (AVs). In other words, the proposed SAVP system offers a smart-parking solution, both indoors and outdoors, and mainly for AVs looking for vacant parking, wherein the fog nodes act as a middleware layer that provides various parking operations closer to IoT-enabled edge devices. To address the challenges of privacy and security, a lightweight integrated blockchain and cryptography (LIBC) module is deployed, which is functional at each fog node, to authorize and grant access to the AVs in every phase of parking (e.g., from the parking entrance to the parking slot to the parking exit). A proof-of-concept implementation was conducted, wherein the overall computed results, such as the average response time, efficiency, privacy, and security, were examined as highly efficient to enable a proven SAVP system. This study also examined an innovative pace, with careful considerations to combatting the existing SP-system challenges and, therefore, to building and managing future scalable SP systems.}, } @article {pmid35808322, year = {2022}, author = {Katayama, Y and Tachibana, T}, title = {Optimal Task Allocation Algorithm Based on Queueing Theory for Future Internet Application in Mobile Edge Computing Platform.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808322}, issn = {1424-8220}, support = {Platform Technology of Wired/Wireless Access Network Corresponding to Various Services of 5G/Beyond 5G//National Institute of Information and Communications Technology/ ; }, mesh = {*Algorithms ; *Cloud Computing ; *Computer Heuristics ; Forecasting ; Internet/trends ; }, abstract = {For 5G and future Internet, in this paper, we propose a task allocation method for future Internet application to reduce the total latency in a mobile edge computing (MEC) platform with three types of servers: a dedicated MEC server, a shared MEC server, and a cloud server. For this platform, we first calculate the delay between sending a task and receiving a response for the dedicated MEC server, shared MEC server, and cloud server by considering the processing time and transmission delay. Here, the transmission delay for the shared MEC server is derived using queueing theory. Then, we formulate an optimization problem for task allocation to minimize the total latency for all tasks. By solving this optimization problem, tasks can be allocated to the MEC servers and cloud server appropriately. In addition, we propose a heuristic algorithm to obtain the approximate optimal solution in a shorter time. This heuristic algorithm consists of four algorithms: a main algorithm and three additional algorithms. In this algorithm, tasks are divided into two groups, and task allocation is executed for each group. We compare the performance of our proposed heuristic algorithm with the solution obtained by three other methods and investigate the effectiveness of our algorithm. Numerical examples are used to demonstrate the effectiveness of our proposed heuristic algorithm. From some results, we observe that our proposed heuristic algorithm can perform task allocation in a short time and can effectively reduce the total latency in a short time. We conclude that our proposed heuristic algorithm is effective for task allocation in a MEC platform with multiple types of MEC servers.}, } @article {pmid35808234, year = {2022}, author = {Chen, X and Liu, G}, title = {Federated Deep Reinforcement Learning-Based Task Offloading and Resource Allocation for Smart Cities in a Mobile Edge Network.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808234}, issn = {1424-8220}, support = {NO.2018ZDCXL-GY-04-03-02//Shaanxi Key R\&D Program/ ; }, abstract = {Mobile edge computing (MEC) has become an indispensable part of the era of the intelligent manufacturing industry 4.0. In the smart city, computation-intensive tasks can be offloaded to the MEC server or the central cloud server for execution. However, the privacy disclosure issue may arise when the raw data is migrated to other MEC servers or the central cloud server. Since federated learning has the characteristics of protecting the privacy and improving training performance, it is introduced to solve the issue. In this article, we formulate the joint optimization problem of task offloading and resource allocation to minimize the energy consumption of all Internet of Things (IoT) devices subject to delay threshold and limited resources. A two-timescale federated deep reinforcement learning algorithm based on Deep Deterministic Policy Gradient (DDPG) framework (FL-DDPG) is proposed. Simulation results show that the proposed algorithm can greatly reduce the energy consumption of all IoT devices.}, } @article {pmid35808224, year = {2022}, author = {Khanh, TT and Hai, TH and Hossain, MD and Huh, EN}, title = {Fuzzy-Assisted Mobile Edge Orchestrator and SARSA Learning for Flexible Offloading in Heterogeneous IoT Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808224}, issn = {1424-8220}, mesh = {Algorithms ; Fuzzy Logic ; *Internet of Things ; Learning ; Reward ; }, abstract = {In the era of heterogeneous 5G networks, Internet of Things (IoT) devices have significantly altered our daily life by providing innovative applications and services. However, these devices process large amounts of data traffic and their application requires an extremely fast response time and a massive amount of computational resources, leading to a high failure rate for task offloading and considerable latency due to congestion. To improve the quality of services (QoS) and performance due to the dynamic flow of requests from devices, numerous task offloading strategies in the area of multi-access edge computing (MEC) have been proposed in previous studies. Nevertheless, the neighboring edge servers, where computational resources are in excess, have not been considered, leading to unbalanced loads among edge servers in the same network tier. Therefore, in this paper, we propose a collaboration algorithm between a fuzzy-logic-based mobile edge orchestrator (MEO) and state-action-reward-state-action (SARSA) reinforcement learning, which we call the Fu-SARSA algorithm. We aim to minimize the failure rate and service time of tasks and decide on the optimal resource allocation for offloading, such as a local edge server, cloud server, or the best neighboring edge server in the MEC network. Four typical application types, healthcare, AR, infotainment, and compute-intensive applications, were used for the simulation. The performance results demonstrate that our proposed Fu-SARSA framework outperformed other algorithms in terms of service time and the task failure rate, especially when the system was overloaded.}, } @article {pmid35808184, year = {2022}, author = {Aldhyani, THH and Alkahtani, H}, title = {Artificial Intelligence Algorithm-Based Economic Denial of Sustainability Attack Detection Systems: Cloud Computing Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808184}, issn = {1424-8220}, support = {NA000106//'This work was supported through the Annual Funding track by the Deanship of Scientific Re-search, Vice Presidency for Graduate Studies and Scientific Research, King Faisal University, Sau-di Arabia [NA000106]/ ; }, mesh = {Algorithms ; *Artificial Intelligence ; *Cloud Computing ; Neural Networks, Computer ; Support Vector Machine ; }, abstract = {Cloud computing is currently the most cost-effective means of providing commercial and consumer IT services online. However, it is prone to new flaws. An economic denial of sustainability attack (EDoS) specifically leverages the pay-per-use paradigm in building up resource demands over time, culminating in unanticipated usage charges to the cloud customer. We present an effective approach to mitigating EDoS attacks in cloud computing. To mitigate such distributed attacks, methods for detecting them on different cloud computing smart grids have been suggested. These include hard-threshold, machine, and deep learning, support vector machine (SVM), K-nearest neighbors (KNN), random forest (RF) tree algorithms, namely convolutional neural network (CNN), and long short-term memory (LSTM). These algorithms have greater accuracies and lower false alarm rates and are essential for improving the cloud computing service provider security system. The dataset of nine injection attacks for testing machine and deep learning algorithms was obtained from the Cyber Range Lab at the University of New South Wales (UNSW), Canberra. The experiments were conducted in two categories: binary classification, which included normal and attack datasets, and multi-classification, which included nine classes of attack data. The results of the proposed algorithms showed that the RF approach achieved accuracy of 98% with binary classification, whereas the SVM model achieved accuracy of 97.54% with multi-classification. Moreover, statistical analyses, such as mean square error (MSE), Pearson correlation coefficient (R), and the root mean square error (RMSE), were applied in evaluating the prediction errors between the input data and the prediction values from different machine and deep learning algorithms. The RF tree algorithm achieved a very low prediction level (MSE = 0.01465) and a correlation R[2] (R squared) level of 92.02% with the binary classification dataset, whereas the algorithm attained an R[2] level of 89.35% with a multi-classification dataset. The findings of the proposed system were compared with different existing EDoS attack detection systems. The proposed attack mitigation algorithms, which were developed based on artificial intelligence, outperformed the few existing systems. The goal of this research is to enable the detection and effective mitigation of EDoS attacks.}, } @article {pmid35801559, year = {2022}, author = {Mueen, A and Awedh, M and Zafar, B}, title = {Multi-obstacle aware smart navigation system for visually impaired people in fog connected IoT-cloud environment.}, journal = {Health informatics journal}, volume = {28}, number = {3}, pages = {14604582221112609}, doi = {10.1177/14604582221112609}, pmid = {35801559}, issn = {1741-2811}, mesh = {Algorithms ; Humans ; *Persons with Visual Disabilities ; }, abstract = {Design of smart navigation for visually impaired/blind people is a hindering task. Existing researchers analyzed it in either indoor or outdoor environment and also it's failed to focus on optimum route selection, latency minimization and multi-obstacle presence. In order to overcome these challenges and to provide precise assistance to visually impaired people, this paper proposes smart navigation system for visually impaired people based on both image and sensor outputs of the smart wearable. The proposed approach involves the upcoming processes: (i) the input query of the visually impaired people (users) is improved by the query processor in order to achieve accurate assistance. (ii) The safest route from source to destination is provided by implementing Environment aware Bald Eagle Search Optimization algorithm in which multiple routes are identified and classified into three different classes from which the safest route is suggested to the users. (iii) The concept of fog computing is leveraged and the optimal fog node is selected in order to minimize the latency. The fog node selection is executed by using Nearest Grey Absolute Decision Making Algorithm based on multiple parameters. (iv) The retrieval of relevant information is performed by means of computing Euclidean distance between the reference and database information. (v) The multi-obstacle detection is carried out by YOLOv3 Tiny in which both the static and dynamic obstacles are classified into small, medium and large obstacles. (vi) The decision upon navigation is provided by implementing Adaptive Asynchronous Advantage Actor-Critic (A3C) algorithm based on fusion of both image and sensor outputs. (vii) Management of heterogeneous is carried out by predicting and pruning the fault data in the sensor output by minimum distance based extended kalman filter for better accuracy and clustering the similar information by implementing Spatial-Temporal Optics Clustering Algorithm to reduce complexity. The proposed model is implemented in NS 3.26 and the results proved that it outperforms other existing works in terms of obstacle detection and task completion time.}, } @article {pmid35800683, year = {2022}, author = {Chen, T}, title = {Deep Learning-Based Optimization Algorithm for Enterprise Personnel Identity Authentication.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9662817}, pmid = {35800683}, issn = {1687-5273}, mesh = {Algorithms ; *Deep Learning ; Humans ; }, abstract = {Enterprise strategic management is not only an important part of enterprise work, but also an important factor to deepen the reform of management system and promote the centralized and unified management of enterprises. Enterprise strategic management is to study the major problems of survival and development of enterprises in the competitive environment from the overall and long-term point of view. It is the most important function of senior leaders of modern enterprises. Starting from the characteristics of the recognition object, this paper analyzes the individual differences of biometrics through intelligent face image recognition technology to identify biometrics, which can be used to identify different individuals. This paper studies the main problems of personnel identity authentication in the current enterprise strategic management system. Based on identity management and supported by face image recognition technology, deep learning, and cloud computing technology, the personnel management model of the management system is constructed, which solves the problems of personnel real identity authentication and personnel safety behavior control. Experiments show that the model can simplify the workflow, improve the operation efficiency, and reduce the management cost. From the perspective of enterprise system development, building a scientific enterprise strategic management system is of great significance to improve the scientific level of enterprise system management.}, } @article {pmid35799648, year = {2022}, author = {Ehsan, A and Haider, KZ and Faisal, S and Zahid, FM and Wangari, IM}, title = {Self-Adaptation Resource Allocation for Continuous Offloading Tasks in Pervasive Computing.}, journal = {Computational and mathematical methods in medicine}, volume = {2022}, number = {}, pages = {8040487}, pmid = {35799648}, issn = {1748-6718}, mesh = {Algorithms ; *Artificial Intelligence ; Cloud Computing ; Humans ; *Mobile Applications ; Resource Allocation ; }, abstract = {Advancement in technology has led to an increase in data. Consequently, techniques such as deep learning and artificial intelligence which are used in deciphering data are increasingly becoming popular. Further, advancement in technology does increase user expectations on devices, including consumer interfaces such as mobile apps, virtual environments, or popular software systems. As a result, power from the battery is consumed fast as it is used in providing high definition display as well as in charging the sensors of the devices. Low latency requires more power consumption in certain conditions. Cloud computing improves the computational difficulties of smart devices with offloading. By optimizing the device's parameters to make it easier to find optimal decisions for offloading tasks, using a metaheuristic algorithm to transfer the data or offload the task, cloud computing makes it easier. In cloud servers, we offload the tasks and limit their resources by simulating them in a virtual environment. Then we check resource parameters and compare them using metaheuristic algorithms. When comparing the default algorithm FCFS to ACO or PSO, we find that PSO has less battery or makespan time compared to FCFS or ACO. The energy consumption of devices is reduced if their resources are offloaded, so we compare the results of metaheuristic algorithms to find less battery usage or makespan time, resulting in the PSO increasing battery life or making the system more efficient.}, } @article {pmid35795755, year = {2022}, author = {Li, J and Guo, B and Liu, K and Zhou, J}, title = {Low Power Scheduling Approach for Heterogeneous System Based on Heuristic and Greedy Method.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9598933}, pmid = {35795755}, issn = {1687-5273}, abstract = {Big data, cloud computing, and artificial intelligence technologies supported by heterogeneous systems are constantly changing our life and cognition of the world. At the same time, its energy consumption affects the operation cost and system reliability, and this attracts the attention of architecture designers and researchers. In order to solve the problem of energy in heterogeneous system environment, inspired by the results of 0-1 programming, a scheduling method of heuristic and greedy energy saving (HGES) approach is proposed to allocate tasks reasonably to achieve the purpose of energy saving. Firstly, all tasks are assigned to each GPU in the system, and then the tasks are divided into high-value tasks and low-value tasks by the calculated average time value and variance value of all tasks. By using the greedy method, the high-value tasks are assigned first, and then the low-value tasks are allocated. In order to verify the effectiveness and rationality of HGES, different tasks with different inputs and different comparison methods are designed and tested. The experimental results on different platforms show that the HGES has better energy saving than that of existing method and can get result faster than that of the 0-1 programming.}, } @article {pmid35795749, year = {2022}, author = {Zhang, H and Zuo, F}, title = {Construction of Digital Teaching Resources of British and American Literature Using Few-Shot Learning and Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4526128}, pmid = {35795749}, issn = {1687-5273}, mesh = {*Cloud Computing ; Humans ; *Learning ; Reproducibility of Results ; United States ; }, abstract = {British and American literature is a compulsory course for English majors in Chinese colleges and universities. It plays an important role in cultivating students' aesthetic consciousness and moral cultivation, improving students' humanistic quality and cultural taste, and shaping students' complete personalities. With the rapid development of cloud technology and mobile Internet technology, mobile learning based on mobile devices will become an important direction of mobile Internet technology applications. Based on cloud computing, this paper studies the construction of digital teaching resources of the British and American literature. Through the experiment on the learning simplicity of literature courses for English majors, it is found that during the learning period of 40 people, the average proportion of the most difficult is 16.3%, the average proportion of the second difficult is 35.2%, and the average proportion of the easier is 18.5%. Compared with the next difficulty, the proportion of difficulty is the highest, followed by the easy and finally the most difficult. As one of the core technologies of cloud computing, data split storage technology adopts measures such as isomorphism and interchangeability of computing nodes, redundant storage, and multicopy fault tolerance to ensure the high security and reliability of user data and users do not have to worry about data loss and virus invasion. As a new generation of technical means, cloud computing can realize the unified management and scheduling of distributed and heterogeneous resources and provide a new development direction for promoting the coconstruction and sharing of the British and American literature digital teaching platforms in higher vocational colleges and truly realizing national learning and lifelong learning.}, } @article {pmid35792609, year = {2022}, author = {Gause, G and Mokgaola, IO and Rakhudu, MA}, title = {Technology usage for teaching and learning in nursing education: An integrative review.}, journal = {Curationis}, volume = {45}, number = {1}, pages = {e1-e9}, pmid = {35792609}, issn = {2223-6279}, mesh = {*COVID-19 ; *Education, Nursing ; Humans ; Learning ; Technology ; }, abstract = {BACKGROUND: The increasing availability of technology devices or portable digital assistant devices continues to change the teaching-learning landscape, including technology-supported learning. Portable digital assistants and technology usage have become an integral part of teaching and learning nowadays. Cloud computing, which includes YouTube, Google Apps, Dropbox and Twitter, has become the reality of today's teaching and learning and has noticeably improved higher education, including nursing education.

OBJECTIVES:  The aim of this integrative literature review was to explore and describe technology usage for teaching and learning in nursing education.

METHOD:  A five-step integrative review framework by Whittemore and Knafl was used to attain the objective of this study. The authors searched for both empirical and non-empirical articles from EBSCOhost (health information source and health science), ScienceDirect and African Journals Online Library databases to establish what is already known about the keywords. Key terms included in literature search were coronavirus disease 2019 (COVID-19), digital learning, online learning, nursing, teaching and learning, and technology use.

RESULTS:  Nineteen articles were selected for analysis. The themes that emerged from this review were (1) technology use in nursing education, (2) the manner in which technology is used in nursing education, (3) antecedents for technology use in nursing education, (4) advantages of technology use in nursing education, (5) disadvantages of technology use in nursing education and (6) technology use in nursing education amidst COVID-19.

CONCLUSION:  Technology in nursing education is used in both clinical and classroom teaching to complement learning. However, there is still a gap in its acceptance despite its upward trend.Contribution: The findings of this study contribute to the body of knowledge on the phenomenon of technology use for teaching and learning in nursing education.}, } @article {pmid35782725, year = {2022}, author = {Wang, X and Wang, C and Li, L and Ma, Q and Ma, A and Liu, B}, title = {DESSO-DB: A web database for sequence and shape motif analyses and identification.}, journal = {Computational and structural biotechnology journal}, volume = {20}, number = {}, pages = {3053-3058}, pmid = {35782725}, issn = {2001-0370}, abstract = {Cis-regulatory motif (motif for short) identification and analyses are essential steps in detecting gene regulatory mechanisms. Deep learning (DL) models have shown substantial advances in motif prediction. In parallel, intuitive and integrative web databases are needed to make effective use of DL models and ensure easy access to the identified motifs. Here, we present DESSO-DB, a web database developed to allow efficient access to the identified motifs and diverse motif analyses. DESSO-DB provides motif prediction results and visualizations of 690 ENCODE human Chromatin Immunoprecipitation sequencing (ChIP-seq) data (including 161 transcription factors (TFs) in 91 cell lines) and 1,677 human ChIP-seq data (including 547 TFs in 359 cell lines) from Cistrome DB using DESSO, which is an in-house developed DL tool for motif prediction. It also provides online motif finding and scanning functions for new ChIP-seq/ATAC-seq datasets and downloadable motif results of the above 690 DECODE datasets, 126 cancer ChIP-seq, 55 RNA Crosslinking-Immunoprecipitation and high-throughput sequencing (CLIP-seq) data. DESSO-DB is deployed on the Google Cloud Platform, providing stabilized and efficient resources freely to the public. DESSO-DB is free and available at http://cloud.osubmi.com/DESSO/.}, } @article {pmid35773889, year = {2022}, author = {Kiourtis, A and Karamolegkos, P and Karabetian, A and Voulgaris, K and Poulakis, Y and Mavrogiorgou, A and Kyriazis, D}, title = {An Autoscaling Platform Supporting Graph Data Modelling Big Data Analytics.}, journal = {Studies in health technology and informatics}, volume = {295}, number = {}, pages = {376-379}, doi = {10.3233/SHTI220743}, pmid = {35773889}, issn = {1879-8365}, mesh = {Big Data ; *COVID-19 ; Data Science ; Delivery of Health Care ; *Diastema ; Humans ; }, abstract = {Big Data has proved to be vast and complex, without being efficiently manageable through traditional architectures, whereas data analysis is considered crucial for both technical and non-technical stakeholders. Current analytics platforms are siloed for specific domains, whereas the requirements to enhance their use and lower their technicalities are continuously increasing. This paper describes a domain-agnostic single access autoscaling Big Data analytics platform, namely Diastema, as a collection of efficient and scalable components, offering user-friendly analytics through graph data modelling, supporting technical and non-technical stakeholders. Diastema's applicability is evaluated in healthcare through a predicting classifier for a COVID19 dataset, considering real-world constraints.}, } @article {pmid35759991, year = {2022}, author = {Wu, Z and Xuan, S and Xie, J and Lin, C and Lu, C}, title = {How to ensure the confidentiality of electronic medical records on the cloud: A technical perspective.}, journal = {Computers in biology and medicine}, volume = {147}, number = {}, pages = {105726}, doi = {10.1016/j.compbiomed.2022.105726}, pmid = {35759991}, issn = {1879-0534}, mesh = {Computer Security ; *Confidentiality ; *Electronic Health Records ; Humans ; }, abstract = {From a technical perspective, for electronic medical records (EMR), this paper proposes an effective confidential management solution on the cloud, whose basic idea is to deploy a trusted local server between the untrusted cloud and each trusted client of a medical information management system, responsible for running an EMR cloud hierarchical storage model and an EMR cloud segmentation query model. (1) The EMR cloud hierarchical storage model is responsible for storing light EMR data items (such as patient basic information) on the local server, while encrypting heavy EMR data items (such as patient medical images) and storing them on the cloud, to ensure the confidentiality of electronic medical records on the cloud. (2) The EMR cloud segmentation query model performs EMR related query operations through the collaborative interaction between the local server and the cloud server, to ensure the accuracy and efficiency of each EMR query statement. Finally, both theoretical analysis and experimental evaluation demonstrate the effectiveness of the proposed solution for confidentiality management of electronic medical records on the cloud, i.e., which can ensure the confidentiality of electronic medical records on the untrusted cloud, without compromising the availability of an existing medical information management system.}, } @article {pmid35756852, year = {2022}, author = {Puneet, and Kumar, R and Gupta, M}, title = {Optical coherence tomography image based eye disease detection using deep convolutional neural network.}, journal = {Health information science and systems}, volume = {10}, number = {1}, pages = {13}, pmid = {35756852}, issn = {2047-2501}, abstract = {Over the past few decades, health care industries and medical practitioners faced a lot of obstacles to diagnosing medical-related problems due to inadequate technology and availability of equipment. In the present era, computer science technologies such as IoT, Cloud Computing, Artificial Intelligence and its allied techniques, etc. play a crucial role in the identification of medical diseases, especially in the domain of Ophthalmology. Despite this, ophthalmologists have to perform the various disease diagnosis task manually which is time-consuming and the chances of error are also very high because some of the abnormalities of eye diseases possess the same symptoms. Furthermore, multiple autonomous systems also exist to categorize the diseases but their prediction rate does not accomplish state-of-art accuracy. In the proposed approach by implementing the concept of Attention, Transfer Learning with the Deep Convolution Neural Network, the model accomplished an accuracy of 97.79% and 95.6% on the training and testing data respectively. This autonomous model efficiently classifies the various oscular disorders namely Choroidal Neovascularization, Diabetic Macular Edema, Drusen from the Optical Coherence Tomography images. It may provide a realistic solution to the healthcare sector to bring down the ophthalmologist burden in the screening of Diabetic Retinopathy.}, } @article {pmid35756406, year = {2022}, author = {Zhang, H and Li, M}, title = {Integrated Design and Development of Intelligent Scenic Area Rural Tourism Information Service Based on Hybrid Cloud.}, journal = {Computational and mathematical methods in medicine}, volume = {2022}, number = {}, pages = {5316304}, pmid = {35756406}, issn = {1748-6718}, mesh = {Humans ; Information Services ; *Tourism ; *Travel ; }, abstract = {Although the "Internet+" technologies (big data and cloud computing) have been implemented in many industries, each industry involved in rural tourism economic information services has its own database, and there are still vast economic information resources that have not been exploited. Z travel agency through rural tourism enterprise third-party information services and mobile context-awareness-based Z travel has achieved good economic and social benefits by deep value mining and innovative application of the existing data of the enterprise through the third-party information service of rural tourism enterprises and mobile context-aware travel recommendation service. It clearly demonstrates that, in order to maximise the benefits of economic data, rural tourist businesses should focus not only on the application of new technologies and methodologies but also on the core of demand and data-driven and thoroughly investigate the potential value of current data. This paper mainly analyzes the problems related to how rural tourism can be upgraded under the smart tourism platform, with the aim of improving the development of China's rural tourism industry with the help of an integrated smart tourism platform, and proposes a hybrid cloud-based integrated system of smart scenic rural tourism information services, which can meet the actual use needs of rural tourism, with good shared service effect and platform application performance, and promote the development of rural tourism and resource utilization rate.}, } @article {pmid35755764, year = {2022}, author = {Hu, Q}, title = {Optimization of Online Course Platform for Piano Preschool Education Based on Internet Cloud Computing System.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6525866}, pmid = {35755764}, issn = {1687-5273}, mesh = {*Cloud Computing ; Humans ; Internet ; *Learning ; Students ; }, abstract = {This article focuses on introducing online piano teaching methods and has developed and implemented a preschool piano education online course platform. The system consists of four parts: backend, WeChat, client, and web page. Backend development uses PHP language and Laravel system framework, WeChat and web development both use JavaScript language and React framework, client development uses Objective-C language, and the system provides internal support for RESTful API, mainly for client, WeChat, and web. The client relies on the existing voice sensors of the research group to recognize and evaluate the performance of the students. The role of the client is to show the students their homework and demonstrate the activities performed by the teacher. The function of the WeChat terminal is to manage student work, user information, and user social interaction functions. The function of the web page is the score management and data analysis functions. Based on the knowledge of network course design, this article studies the design of piano preschool education platform and adds relevant components of the Internet cloud computer system and voice sensor to this platform, which provides great convenience for students to learn piano.}, } @article {pmid35755732, year = {2022}, author = {Liu, B and Zhang, T and Hu, W}, title = {Intelligent Traffic Flow Prediction and Analysis Based on Internet of Things and Big Data.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6420799}, pmid = {35755732}, issn = {1687-5273}, mesh = {Automobiles ; *Big Data ; Cities ; Humans ; *Internet of Things ; Transportation ; }, abstract = {Nowadays, the problem of road traffic safety cannot be ignored. Almost all major cities have problems such as poor traffic environment and low road efficiency. Large-scale and long-term traffic congestion occurs almost every day. Transportation has developed rapidly, and more and more advanced means of transportation have emerged. However, automobile is one of the main means of transportation for people to travel. In the world, there are serious traffic jams in almost all cities. The excessive traffic flow every day leads to the paralysis of the urban transportation system, which brings great inconvenience and impact to people's travel. Various countries have also actively taken corresponding measures, i.e., traffic diversion, number restriction, or expanding the scale of the road network, but these measures can bring little effect. Traditional intelligent traffic flow forecasting has some problems, such as low accuracy and delay. Aiming at this problem, this paper uses the model of the combination of Internet of Things and big data to apply and analyze its social benefits in intelligent traffic flow forecasting and analyzes its three-tier network architecture model, namely, perception layer, network layer, and application layer. Research and analyze the mode of combining cloud computing and edge computing. From the multiperspective linear discriminant analysis algorithm of the combination method of combining the same points and differences between data and data into multiple atomic services, intelligent traffic flow prediction based on the combination of Internet of Things and big data is performed. Through the monitoring and extraction of relevant traffic flow data, data analysis, processing and storage, and visual display, improve the accuracy and effectiveness and make it easier to improve the prediction accuracy of overall traffic flow. The traffic flow prediction of the system of Internet of Things and big data is given through the case experiment. The method proposed in this paper can be applied in intelligent transportation services and can predict the stability of transportation and traffic flow in real time so as to optimize traffic congestion, reduce manual intervention, and achieve the goal of intelligent traffic management.}, } @article {pmid35755635, year = {2022}, author = {Sladky, V and Nejedly, P and Mivalt, F and Brinkmann, BH and Kim, I and St Louis, EK and Gregg, NM and Lundstrom, BN and Crowe, CM and Attia, TP and Crepeau, D and Balzekas, I and Marks, VS and Wheeler, LP and Cimbalnik, J and Cook, M and Janca, R and Sturges, BK and Leyde, K and Miller, KJ and Van Gompel, JJ and Denison, T and Worrell, GA and Kremen, V}, title = {Distributed brain co-processor for tracking spikes, seizures and behaviour during electrical brain stimulation.}, journal = {Brain communications}, volume = {4}, number = {3}, pages = {fcac115}, pmid = {35755635}, issn = {2632-1297}, support = {R01 NS092882/NS/NINDS NIH HHS/United States ; U24 NS113637/NS/NINDS NIH HHS/United States ; }, abstract = {Early implantable epilepsy therapy devices provided open-loop electrical stimulation without brain sensing, computing, or an interface for synchronized behavioural inputs from patients. Recent epilepsy stimulation devices provide brain sensing but have not yet developed analytics for accurately tracking and quantifying behaviour and seizures. Here we describe a distributed brain co-processor providing an intuitive bi-directional interface between patient, implanted neural stimulation and sensing device, and local and distributed computing resources. Automated analysis of continuous streaming electrophysiology is synchronized with patient reports using a handheld device and integrated with distributed cloud computing resources for quantifying seizures, interictal epileptiform spikes and patient symptoms during therapeutic electrical brain stimulation. The classification algorithms for interictal epileptiform spikes and seizures were developed and parameterized using long-term ambulatory data from nine humans and eight canines with epilepsy, and then implemented prospectively in out-of-sample testing in two pet canines and four humans with drug-resistant epilepsy living in their natural environments. Accurate seizure diaries are needed as the primary clinical outcome measure of epilepsy therapy and to guide brain-stimulation optimization. The brain co-processor system described here enables tracking interictal epileptiform spikes, seizures and correlation with patient behavioural reports. In the future, correlation of spikes and seizures with behaviour will allow more detailed investigation of the clinical impact of spikes and seizures on patients.}, } @article {pmid35751030, year = {2022}, author = {Shaukat, Z and Farooq, QUA and Tu, S and Xiao, C and Ali, S}, title = {A state-of-the-art technique to perform cloud-based semantic segmentation using deep learning 3D U-Net architecture.}, journal = {BMC bioinformatics}, volume = {23}, number = {1}, pages = {251}, pmid = {35751030}, issn = {1471-2105}, mesh = {*Brain Neoplasms/diagnostic imaging/pathology ; Cloud Computing ; *Deep Learning ; *Glioma ; Humans ; Image Processing, Computer-Assisted/methods ; Magnetic Resonance Imaging/methods ; Semantics ; }, abstract = {Glioma is the most aggressive and dangerous primary brain tumor with a survival time of less than 14 months. Segmentation of tumors is a necessary task in the image processing of the gliomas and is important for its timely diagnosis and starting a treatment. Using 3D U-net architecture to perform semantic segmentation on brain tumor dataset is at the core of deep learning. In this paper, we present a unique cloud-based 3D U-Net method to perform brain tumor segmentation using BRATS dataset. The system was effectively trained by using Adam optimization solver by utilizing multiple hyper parameters. We got an average dice score of 95% which makes our method the first cloud-based method to achieve maximum accuracy. The dice score is calculated by using Sørensen-Dice similarity coefficient. We also performed an extensive literature review of the brain tumor segmentation methods implemented in the last five years to get a state-of-the-art picture of well-known methodologies with a higher dice score. In comparison to the already implemented architectures, our method ranks on top in terms of accuracy in using a cloud-based 3D U-Net framework for glioma segmentation.}, } @article {pmid35747132, year = {2022}, author = {Li, W and Guo, Y}, title = {A Secure Private Cloud Storage Platform for English Education Resources Based on IoT Technology.}, journal = {Computational and mathematical methods in medicine}, volume = {2022}, number = {}, pages = {8453470}, pmid = {35747132}, issn = {1748-6718}, mesh = {*Big Data ; *Cloud Computing ; Humans ; Technology ; }, abstract = {The contemporary ubiquitous "cloud" network knowledge and information resources, as well as ecological pedagogy theory, have enlarged teaching research's perspective, widened teaching research's innovation area, and created practical options for English classroom reform. Cloud education relies on the Internet of Things, cloud computing, and big data to have a huge impact on the English learning process. The key to the integration of English education resources is the storage of huge amount of English teaching data. Applying the technology and methods of cloud storage to the construction of English education resource integration can effectively save the educational resources of schools, improve the utilization rate of English education resources, and thus enhance the teaching level of English subjects. In this work, we examine the existing state of English education resource building and teaching administration and offer a way for creating a "private cloud" of English education materials. We not only examined the architecture and three-layer modules of cloud computing in depth, but we also analyzed the "private cloud" technology and built the cloud structure of English teaching materials on this foundation. We hope that this paper can help and inspire us to solve the problems of uneven distribution, irregular management, and difficult sharing in the construction of English education resources.}, } @article {pmid35746414, year = {2022}, author = {Ud Din, MM and Alshammari, N and Alanazi, SA and Ahmad, F and Naseem, S and Khan, MS and Haider, HSI}, title = {InteliRank: A Four-Pronged Agent for the Intelligent Ranking of Cloud Services Based on End-Users' Feedback.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {12}, pages = {}, pmid = {35746414}, issn = {1424-8220}, mesh = {*Cloud Computing ; Data Collection ; Feedback ; Reproducibility of Results ; *Software ; }, abstract = {Cloud Computing (CC) provides a combination of technologies that allows the user to use the most resources in the least amount of time and with the least amount of money. CC semantics play a critical role in ranking heterogeneous data by using the properties of different cloud services and then achieving the optimal cloud service. Regardless of the efforts made to enable simple access to this CC innovation, in the presence of various organizations delivering comparative services at varying cost and execution levels, it is far more difficult to identify the ideal cloud service based on the user's requirements. In this research, we propose a Cloud-Services-Ranking Agent (CSRA) for analyzing cloud services using end-users' feedback, including Platform as a Service (PaaS), Infrastructure as a Service (IaaS), and Software as a Service (SaaS), based on ontology mapping and selecting the optimal service. The proposed CSRA possesses Machine-Learning (ML) techniques for ranking cloud services using parameters such as availability, security, reliability, and cost. Here, the Quality of Web Service (QWS) dataset is used, which has seven major cloud services categories, ranked from 0-6, to extract the required persuasive features through Sequential Minimal Optimization Regression (SMOreg). The classification outcomes through SMOreg are capable and demonstrate a general accuracy of around 98.71% in identifying optimum cloud services through the identified parameters. The main advantage of SMOreg is that the amount of memory required for SMO is linear. The findings show that our improved model in terms of precision outperforms prevailing techniques such as Multilayer Perceptron (MLP) and Linear Regression (LR).}, } @article {pmid35746245, year = {2022}, author = {Liu, X and Jin, J and Dong, F}, title = {Edge-Computing-Based Intelligent IoT: Architectures, Algorithms and Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {12}, pages = {}, pmid = {35746245}, issn = {1424-8220}, abstract = {With the rapid growth of the Internet of Things (IoT), 5G networks and beyond, the computing paradigm for intelligent IoT systems is shifting from conventional centralized-cloud computing to distributed edge computing [...].}, } @article {pmid35746169, year = {2022}, author = {Dezfouli, B and Liu, Y}, title = {Editorial: Special Issue "Edge and Fog Computing for Internet of Things Systems".}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {12}, pages = {}, pmid = {35746169}, issn = {1424-8220}, abstract = {Employing edge and fog computing for building IoT systems is essential, especially because of the massive number of data generated by sensing devices, the delay requirements of IoT applications, the high burden of data processing on cloud platforms, and the need to take immediate actions against security threats.}, } @article {pmid35746127, year = {2022}, author = {Lakhan, A and Morten Groenli, T and Majumdar, A and Khuwuthyakorn, P and Hussain Khoso, F and Thinnukool, O}, title = {Potent Blockchain-Enabled Socket RPC Internet of Healthcare Things (IoHT) Framework for Medical Enterprises.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {12}, pages = {}, pmid = {35746127}, issn = {1424-8220}, mesh = {*Blockchain ; Cloud Computing ; Computer Security ; Delivery of Health Care ; Humans ; Internet ; *Internet of Things ; }, abstract = {Present-day intelligent healthcare applications offer digital healthcare services to users in a distributed manner. The Internet of Healthcare Things (IoHT) is the mechanism of the Internet of Things (IoT) found in different healthcare applications, with devices that are attached to external fog cloud networks. Using different mobile applications connecting to cloud computing, the applications of the IoHT are remote healthcare monitoring systems, high blood pressure monitoring, online medical counseling, and others. These applications are designed based on a client-server architecture based on various standards such as the common object request broker (CORBA), a service-oriented architecture (SOA), remote method invocation (RMI), and others. However, these applications do not directly support the many healthcare nodes and blockchain technology in the current standard. Thus, this study devises a potent blockchain-enabled socket RPC IoHT framework for medical enterprises (e.g., healthcare applications). The goal is to minimize service costs, blockchain security costs, and data storage costs in distributed mobile cloud networks. Simulation results show that the proposed blockchain-enabled socket RPC minimized the service cost by 40%, the blockchain cost by 49%, and the storage cost by 23% for healthcare applications.}, } @article {pmid35745356, year = {2022}, author = {Liu, H and Zhang, R and Liu, Y and He, C}, title = {Unveiling Evolutionary Path of Nanogenerator Technology: A Novel Method Based on Sentence-BERT.}, journal = {Nanomaterials (Basel, Switzerland)}, volume = {12}, number = {12}, pages = {}, pmid = {35745356}, issn = {2079-4991}, support = {72104224, 71974107, L2124002, 91646102//National Natural Science Foundation of China/ ; CKCEST-2022-1-30//Construction Project of China Knowledge Center for Engineering Sciences and Technology/ ; }, abstract = {In recent years, nanogenerator technology has developed rapidly with the rise of cloud computing, artificial intelligence, and other fields. Therefore, the quick identification of the evolutionary path of nanogenerator technology from a large amount of data attracts much attention. It is of great significance in grasping technical trends and analyzing technical areas of interest. However, there are some limitations in previous studies. On the one hand, previous research on technological evolution has generally utilized bibliometrics, patent analysis, and citations between patents and papers, ignoring the rich semantic information contained therein; on the other hand, its evolution analysis perspective is single, and it is difficult to obtain accurate results. Therefore, this paper proposes a new framework based on the methods of Sentence-BERT and phrase mining, using multi-source data, such as papers and patents, to unveil the evolutionary path of nanogenerator technology. Firstly, using text vectorization, clustering algorithms, and the phrase mining method, current technical themes of significant interest to researchers can be obtained. Next, this paper correlates the multi-source fusion themes through semantic similarity calculation and demonstrates the multi-dimensional technology evolutionary path by using the "theme river map". Finally, this paper presents an evolution analysis from the perspective of frontier research and technology research, so as to discover the development focus of nanogenerators and predict the future application prospects of nanogenerator technology.}, } @article {pmid35742161, year = {2022}, author = {Ashraf, E and Areed, NFF and Salem, H and Abdelhay, EH and Farouk, A}, title = {FIDChain: Federated Intrusion Detection System for Blockchain-Enabled IoT Healthcare Applications.}, journal = {Healthcare (Basel, Switzerland)}, volume = {10}, number = {6}, pages = {}, pmid = {35742161}, issn = {2227-9032}, abstract = {Recently, there has been considerable growth in the internet of things (IoT)-based healthcare applications; however, they suffer from a lack of intrusion detection systems (IDS). Leveraging recent technologies, such as machine learning (ML), edge computing, and blockchain, can provide suitable and strong security solutions for preserving the privacy of medical data. In this paper, FIDChain IDS is proposed using lightweight artificial neural networks (ANN) in a federated learning (FL) way to ensure healthcare data privacy preservation with the advances of blockchain technology that provides a distributed ledger for aggregating the local weights and then broadcasting the updated global weights after averaging, which prevents poisoning attacks and provides full transparency and immutability over the distributed system with negligible overhead. Applying the detection model at the edge protects the cloud if an attack happens, as it blocks the data from its gateway with smaller detection time and lesser computing and processing capacity as FL deals with smaller sets of data. The ANN and eXtreme Gradient Boosting (XGBoost) models were evaluated using the BoT-IoT dataset. The results show that ANN models have higher accuracy and better performance with the heterogeneity of data in IoT devices, such as intensive care unit (ICU) in healthcare systems. Testing the FIDChain with different datasets (CSE-CIC-IDS2018, Bot Net IoT, and KDD Cup 99) reveals that the BoT-IoT dataset has the most stable and accurate results for testing IoT applications, such as those used in healthcare systems.}, } @article {pmid35734349, year = {2022}, author = {Aldahwan, NS and Ramzan, MS}, title = {The Descriptive Data Analysis for the Adoption of Community Cloud in Saudi HEI-Based Factor Adoption.}, journal = {BioMed research international}, volume = {2022}, number = {}, pages = {7765204}, pmid = {35734349}, issn = {2314-6141}, mesh = {*Cloud Computing ; *Data Analysis ; Humans ; Reproducibility of Results ; Saudi Arabia ; Surveys and Questionnaires ; }, abstract = {Due to its increased reliability, adaptability, scalability, availability, and processing capacity, cloud computing is rapidly becoming a popular trend around the world. One of the major issues with cloud computing is making informed decision about adoption of community cloud (CC) computing (ACCC). To date, there are various technology acceptance theories and models to validate perspective of ACCC at both organizational and individual levels. However, no experimental studies have been carried out to provide a comprehensive assessment of the factors of ACCC, specifically in the area of the Saudi Higher Education (HEI) Institution. Thus, this research was aimed at exploring the factors of ACCC and the relationship to the experiences of the employees. The analysis of the employee context was driven by the success factors of technological, organizational, environmental, human, security, and advantage contexts on community cloud computing adoption in HEI. The data collection was a questionnaire-based survey based on 106 responses. We present findings based on descriptive analysis in identifying the significant component that contributed to the effective implementation of ACCC. Security concerns are a significant influencing element in the adoption of community cloud technology.}, } @article {pmid35730340, year = {2022}, author = {Cotur, Y and Olenik, S and Asfour, T and Bruyns-Haylett, M and Kasimatis, M and Tanriverdi, U and Gonzalez-Macia, L and Lee, HS and Kozlov, AS and Güder, F}, title = {Bioinspired Stretchable Transducer for Wearable Continuous Monitoring of Respiratory Patterns in Humans and Animals.}, journal = {Advanced materials (Deerfield Beach, Fla.)}, volume = {34}, number = {33}, pages = {e2203310}, doi = {10.1002/adma.202203310}, pmid = {35730340}, issn = {1521-4095}, support = {//Imperial College Department of Bioengineering/ ; //Institute for Security Science and Technology/ ; //Turkish Ministry of Education/ ; //EPSRC IAA/ ; OPP1212574//Bill and Melinda Gates Foundation/ ; W911QY-20-R-0022//US Army/ ; W911NF1820120//US Army/ ; 1846144//EPSRC DTP/ ; //Imperial College Centre for Processable Electronics/ ; 10004425//Innovate UK/ ; //Centre for Blast Injury Studies/ ; 214234/Z/18/Z/WT_/Wellcome Trust/United Kingdom ; }, mesh = {Animals ; Artificial Intelligence ; Dogs ; Humans ; Monitoring, Physiologic ; Silicones ; Transducers ; *Wearable Electronic Devices ; }, abstract = {A bio-inspired continuous wearable respiration sensor modeled after the lateral line system of fish is reported which is used for detecting mechanical disturbances in the water. Despite the clinical importance of monitoring respiratory activity in humans and animals, continuous measurements of breathing patterns and rates are rarely performed in or outside of clinics. This is largely because conventional sensors are too inconvenient or expensive for wearable sensing for most individuals and animals. The bio-inspired air-silicone composite transducer (ASiT) is placed on the chest and measures respiratory activity by continuously measuring the force applied to an air channel embedded inside a silicone-based elastomeric material. The force applied on the surface of the transducer during breathing changes the air pressure inside the channel, which is measured using a commercial pressure sensor and mixed-signal wireless electronics. The transducer produced in this work are extensively characterized and tested with humans, dogs, and laboratory rats. The bio-inspired ASiT may enable the early detection of a range of disorders that result in altered patterns of respiration. The technology reported can also be combined with artificial intelligence and cloud computing to algorithmically detect illness in humans and animals remotely, reducing unnecessary visits to clinics.}, } @article {pmid35730064, year = {2023}, author = {Pillen, D and Eckard, M}, title = {The impact of the shift to cloud computing on digital recordkeeping practices at the University of Michigan Bentley historical library.}, journal = {Archival science}, volume = {23}, number = {1}, pages = {65-80}, pmid = {35730064}, issn = {1573-7500}, abstract = {Cloud-based productivity, collaboration, and storage tools offer increased opportunities for collaboration and potential cost-savings over locally hosted solutions and have seen widespread adoption throughout industry, government, and academia over the last decade. While these tools benefit organizations, IT departments, and day-to-day-users, they present unique challenges for records managers and archivists. As a review of the relevant literature demonstrates, issues surrounding cloud computing are not limited to the technology-although the implementation and technological issues are numerous-but also include organization management, human behavior, regulation, and records management, making the process of archiving digital information in this day and age all the more difficult. This paper explores some of the consequences of this shift and its effect on digital recordkeeping at the Bentley Historical Library, whose mission is to "collect the materials for the University of Michigan." After providing context for this problem by discussing relevant literature, two practicing archivists will explore the impact of the move toward cloud computing as well as various productivity software and collaboration tools in use at U-M throughout the various stages of a standard lifecycle model for managing records.}, } @article {pmid35730008, year = {2022}, author = {Mahanty, C and Kumar, R and Patro, SGK}, title = {Internet of Medical Things-Based COVID-19 Detection in CT Images Fused with Fuzzy Ensemble and Transfer Learning Models.}, journal = {New generation computing}, volume = {40}, number = {4}, pages = {1125-1141}, pmid = {35730008}, issn = {0288-3635}, abstract = {One of the most difficult research areas in today's healthcare industry to combat the coronavirus pandemic is accurate COVID-19 detection. Because of its low infection miss rate and high sensitivity, chest computed tomography (CT) imaging has been recommended as a viable technique for COVID-19 diagnosis in a number of recent clinical investigations. This article presents an Internet of Medical Things (IoMT)-based platform for improving and speeding up COVID-19 identification. Clinical devices are connected to network resources in the suggested IoMT platform using cloud computing. The method enables patients and healthcare experts to work together in real time to diagnose and treat COVID-19, potentially saving time and effort for both patients and physicians. In this paper, we introduce a technique for classifying chest CT scan images into COVID, pneumonia, and normal classes that use a Sugeno fuzzy integral ensemble across three transfer learning models, namely SqueezeNet, DenseNet-201, and MobileNetV2. The suggested fuzzy ensemble techniques outperform each individual transfer learning methodology as well as trainable ensemble strategies in terms of accuracy. The suggested MobileNetV2 fused with Sugeno fuzzy integral ensemble model has a 99.15% accuracy rate. In the present research, this framework was utilized to identify COVID-19, but it may also be implemented and used for medical imaging analyses of other disorders.}, } @article {pmid35730007, year = {2022}, author = {Gupta, A and Singh, A}, title = {An Intelligent Healthcare Cyber Physical Framework for Encephalitis Diagnosis Based on Information Fusion and Soft-Computing Techniques.}, journal = {New generation computing}, volume = {40}, number = {4}, pages = {1093-1123}, pmid = {35730007}, issn = {0288-3635}, abstract = {Viral encephalitis is a contagious disease that causes life insecurity and is considered one of the major health concerns worldwide. It causes inflammation of the brain and, if left untreated, can have persistent effects on the central nervous system. Conspicuously, this paper proposes an intelligent cyber-physical healthcare framework based on the IoT-fog-cloud collaborative network, employing soft-computing technology and information fusion. The proposed framework uses IoT-based sensors, electronic medical records, and user devices for data acquisition. The fog layer, composed of numerous nodes, processes the most specific encephalitis symptom-related data to classify possible encephalitis cases in real time to issue an alarm when a significant health emergency occurs. Furthermore, the cloud layer involves a multi-step data processing scheme for in-depth data analysis. First, data obtained across multiple data generation sources are fused to obtain a more consistent, accurate, and reliable feature set. Data preprocessing and feature selection techniques are applied to the fused data for dimensionality reduction over the cloud computing platform. An adaptive neuro-fuzzy inference system is applied in the cloud to determine the risk of a disease and classify the results into one of four categories: no risk, probable risk, low risk, and acute risk. Moreover, the alerts are generated and sent to the stakeholders based on the risk factor. Finally, the computed results are stored in the cloud database for future use. For validation purposes, various experiments are performed using real-time datasets. The analysis results performed on the fog and cloud layers show higher performance than the existing models. Future research will focus on the resource allocation in the cloud layer while considering various security aspects to improve the utility of the proposed work.}, } @article {pmid35729139, year = {2022}, author = {Yue, YF and Chen, GP and Wang, L and Yang, J and Yang, KT}, title = {[Dynamic monitoring and evaluation of ecological environment quality in Zhouqu County, Gansu, China based on Google Earth Engine cloud platform].}, journal = {Ying yong sheng tai xue bao = The journal of applied ecology}, volume = {33}, number = {6}, pages = {1608-1614}, doi = {10.13287/j.1001-9332.202206.036}, pmid = {35729139}, issn = {1001-9332}, mesh = {China ; Cloud Computing ; *Ecosystem ; Environmental Monitoring/methods ; *Remote Sensing Technology ; Rivers ; Search Engine ; }, abstract = {Zhouqu County is located in the transition region from the Qinghai-Tibet Plateau to the Qinba Mountains, and is an important part of the ecological barrier in the upper stream of the Yangtze River. In this study, we used the Google Earth Engine cloud processing platform to perform inter-image optimal reconstruction of Landsat surface reflectance images from 1998-2019. We calculated four indicators of regional wet, green, dry, and hot. The component indicators were coupled by principal component analysis to construct remote sensing ecological index (RSEI) and to analyze the spatial and temporal variations of ecological environment quality in Zhouqu County. The results showed that the contribution of the four component indicators to the eigenvalues of the coupled RSEI were above 70%, with even distribution of the loadings, indicating that the RSEI integrated most of the features of the component indicators. From 1998 to 2019, the RSEI of Zhouqu County ranged from 0.55 to 0.63, showing an increasing trend with a growth rate of 0.04·(10 a)[-1], and the area of better grade increased by 425.56 km[2]. The area with altitude ≤2200 m was dominated by medium and lower ecological environment quality grade, while the area of better ecological environment quality grade area increased by 16.5%. The ecological and environmental quality of the region from 2200 to 3300 m was dominated by good grades, increasing to 71.3% in 2019, with the area of medium and below ecological and environmental quality grades decreasing year by year. The area with altitude ≥3300 m was dominated by the medium ecological quality grade. The medium and below ecological quality grades showed a "U" shape trend during the study period. The trend of ecological environment quality in Zhouqu County was becoming better, but with fluctuations. It is necessary to continuously strengthen the protection and management of ecological environment in order to guarantee the continuous improvement of ecological environment quality.}, } @article {pmid35729113, year = {2022}, author = {Erdem, C and Mutsuddy, A and Bensman, EM and Dodd, WB and Saint-Antoine, MM and Bouhaddou, M and Blake, RC and Gross, SM and Heiser, LM and Feltus, FA and Birtwistle, MR}, title = {A scalable, open-source implementation of a large-scale mechanistic model for single cell proliferation and death signaling.}, journal = {Nature communications}, volume = {13}, number = {1}, pages = {3555}, pmid = {35729113}, issn = {2041-1723}, support = {R35 GM141891/GM/NIGMS NIH HHS/United States ; U54 CA209988/CA/NCI NIH HHS/United States ; U54 HG008098/HG/NHGRI NIH HHS/United States ; R01 GM104184/GM/NIGMS NIH HHS/United States ; }, mesh = {Cell Proliferation ; *Cloud Computing ; Computer Simulation ; Signal Transduction ; *Software ; }, abstract = {Mechanistic models of how single cells respond to different perturbations can help integrate disparate big data sets or predict response to varied drug combinations. However, the construction and simulation of such models have proved challenging. Here, we developed a python-based model creation and simulation pipeline that converts a few structured text files into an SBML standard and is high-performance- and cloud-computing ready. We applied this pipeline to our large-scale, mechanistic pan-cancer signaling model (named SPARCED) and demonstrate it by adding an IFNγ pathway submodel. We then investigated whether a putative crosstalk mechanism could be consistent with experimental observations from the LINCS MCF10A Data Cube that IFNγ acts as an anti-proliferative factor. The analyses suggested this observation can be explained by IFNγ-induced SOCS1 sequestering activated EGF receptors. This work forms a foundational recipe for increased mechanistic model-based data integration on a single-cell level, an important building block for clinically-predictive mechanistic models.}, } @article {pmid35725904, year = {2022}, author = {Pradhan, C and Padhee, SK and Bharti, R and Dutta, S}, title = {A process-based recovery indicator for anthropogenically disturbed river system.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {10390}, pmid = {35725904}, issn = {2045-2322}, mesh = {Cross-Sectional Studies ; Environmental Monitoring ; *Floods ; India ; *Rivers ; Seasons ; }, abstract = {The present paper utilizes entropy theory and Google earth engine cloud computing technique to investigate system state and river recovery potential in two large sub-basins of the Mahanadi River, India. The cross-sectional intensity entropy (CIE) is computed for the post-monsoon season (October-March) along the selected reaches. Further, a normalized river recovery indicator (NRRI) is formulated to assess the temporal changes in river health. Finally, NRRI is related to a process-based variable-LFE (low flow exceedance) to comprehend the dominating system dynamics and evolutionary adjustments. The results highlight the existence of both threshold-modulated and filter-dominated systems based on CIE and NRRI variabilities. In addition, the gradual decline in CIE and subsequent stabilization of vegetated landforms can develop an 'event-driven' state, where floods exceeding the low-flow channel possess a direct impact on the river recovery trajectory. Finally, this study emphasizes the presence of instream vegetation as an additional degree of freedom, which further controls the hierarchy of energy dissipation and morphological continuum in the macrochannel settings.}, } @article {pmid35721670, year = {2022}, author = {Bamasag, O and Alsaeedi, A and Munshi, A and Alghazzawi, D and Alshehri, S and Jamjoom, A}, title = {Real-time DDoS flood attack monitoring and detection (RT-AMD) model for cloud computing.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e814}, pmid = {35721670}, issn = {2376-5992}, abstract = {In recent years, the advent of cloud computing has transformed the field of computing and information technology. It has been enabling customers to rent virtual resources and take advantage of various on-demand services with the lowest costs. Despite the advantages of cloud computing, it faces several threats; an example is a distributed denial of service (DDoS) attack, which is considered among the most serious. This article presents real-time monitoring and detection of DDoS attacks on the cloud using a machine learning approach. Naïve Bayes, K-nearest neighbor, decision tree, and random forest machine learning classifiers have been selected to build a predictive model named "Real-Time DDoS flood Attack Monitoring and Detection RT-AMD." The DDoS-2020 dataset was constructed with 70,020 records to evaluate RT-AMD's accuracy. The DDoS-2020 contains three protocols for network/transport-level, which are TCP, DNS, and ICMP. This article evaluates the proposed model by comparing its accuracy with related works. Our model has shown improvement in the results and reached real-time attack detection using incremental learning. The model achieved 99.38% accuracy for the random forest in real-time on the cloud environment and 99.39% on local testing. The RT-AMD was evaluated on the NSL-KDD dataset as well, in which it achieved 99.30% accuracy in real-time in a cloud environment.}, } @article {pmid35721407, year = {2022}, author = {Osmanoglu, M and Demir, S and Tugrul, B}, title = {Privacy-preserving k-NN interpolation over two encrypted databases.}, journal = {PeerJ. Computer science}, volume = {8}, number = {}, pages = {e965}, pmid = {35721407}, issn = {2376-5992}, abstract = {Cloud computing enables users to outsource their databases and the computing functionalities to a cloud service provider to avoid the cost of maintaining a private storage and computational requirements. It also provides universal access to data, applications, and services without location dependency. While cloud computing provides many benefits, it possesses a number of security and privacy concerns. Outsourcing data to a cloud service provider in encrypted form may help to overcome these concerns. However, dealing with the encrypted data makes it difficult for the cloud service providers to perform some operations over the data that will especially be required in query processing tasks. Among the techniques employed in query processing task, the k-nearest neighbor method draws attention due to its simplicity and efficiency, particularly on massive data sets. A number of k-nearest neighbor algorithms for query processing task on a single encrypted database have been proposed. However, the performance of k-nearest neighbor algorithms on a single database may create accuracy and reliability problems. It is a fact that collaboration among different cloud service providers yields more accurate and more reliable results in query processing. By considering this fact, we focus on the k-nearest neighbor (k-NN) problem over two encrypted databases. We introduce a secure two-party k-NN interpolation protocol that enables a query owner to extract the interpolation of the k-nearest neighbors of a query point from two different databases outsourced to two different cloud service providers. We also show that our protocol protects the confidentiality of the data and the query point, and hides data access patterns. Furthermore, we conducted a number of experiment to demonstrate the efficiency of our protocol. The results show that the running time of our protocol is linearly dependent on both the number of nearest neighbours and data size.}, } @article {pmid35720928, year = {2022}, author = {Yuan, G and Xie, F and Tan, H}, title = {Construction of Economic Security Early Warning System Based on Cloud Computing and Data Mining.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {2080840}, pmid = {35720928}, issn = {1687-5273}, mesh = {Algorithms ; *Cloud Computing ; Data Mining ; Forecasting ; *Models, Theoretical ; }, abstract = {Economic security is a core theoretical issue in economics. In modern economic conditions, the ups and downs caused by economic instability in any economic system will affect the stability of the financial market, bring huge losses to the economy, and affect the development of the whole national economy. Therefore, research on the regularity of economic security and economic fluctuations is one of the important contents to ensure economic stability and scientific development. Accurate monitoring and forecasting of economic security are an indispensable link in economic system regulation, and it is also an important reference factor for any economic organization to make decisions. This article focuses on the construction of an economic security early warning system as the main research content. It integrates cloud computing and data mining technologies and is supported by CNN-SVM algorithm and designs an early warning model that can adaptively evaluate and warn the economic security state. Experiments show that when the CNN network in the model uses ReLU activation function and SVM uses RBF function, the prediction accuracy can reach 0.98, and the prediction effect is the best. The data set is verified, and the output Q province's 2018 economic security early warning comprehensive index is 0.893. The 2019 economic security early warning index is 0.829, which is consistent with the actual situation.}, } @article {pmid35720893, year = {2022}, author = {Yin, X and He, J}, title = {Construction of Tourism E-Commerce Platform Based on Artificial Intelligence Algorithm.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {5558011}, pmid = {35720893}, issn = {1687-5273}, mesh = {*Artificial Intelligence ; Commerce ; Data Analysis ; Humans ; *Tourism ; Travel ; }, abstract = {In the late twentieth century, with the rapid development of the Internet, e-commerce has emerged rapidly, which has changed the way people travel around the world. The greatest advantages of e-commerce are the flow of information and data and the importance of traveling freely to experience the mind and body in different fields. Tourism is an important part of the development of e-commerce, but the development of e-commerce tourism lags behind. To solve the current situation of the backward development of tourism e-commerce, this article studies the construction of a tourism e-commerce platform based on an artificial intelligence algorithm. By introducing modern information technology, based on a cloud computing platform, big data analysis, K-means, and other key technologies, this article solves the current situation of the development of an e-commerce platform. It also analyzes the construction methods of traditional cloud platforms and modern cloud platforms through comparative analysis and solves the construction methods suitable for artificial intelligence tourism. At the same time, combined with the actual situation of tourism, this article selects the appropriate networking method based on the analysis of the advantages and disadvantages of wired and wireless coverage methods and economics to complete the project design. Its purpose is to ensure that the work meets the specific construction needs and build an artificial intelligence-based smart tourism big data analysis model. It promotes the development of tourism e-commerce industry. It saves costs and improves efficiency for travel service providers. Then, according to the actual situation of tourism, it conducts demand analysis from the perspectives of tourists, scenic spots, service providers, tourism administrative agencies, etc. Experiments show that, through the practical application of the artificial intelligence tourism mobile e-commerce platform in this article, it can be seen that the artificial intelligence tourism mobile e-commerce platform designed in this article can meet the needs of customers for shopping-related tourism commodities. Tourists of attractions have increased by 3.54%, and the economy of tourist destinations has increased by 4.2%.}, } @article {pmid35720617, year = {2022}, author = {Cheng, W and Lian, W and Tian, J}, title = {Building the hospital intelligent twins for all-scenario intelligence health care.}, journal = {Digital health}, volume = {8}, number = {}, pages = {20552076221107894}, pmid = {35720617}, issn = {2055-2076}, abstract = {The COVID-19 pandemic has accelerated a long-term trend of smart hospital development. However, there is no consistent conceptualization of what a smart hospital entails. Few hospitals have genuinely reached being "smart," primarily failing to bring systems together and consider implications from all perspectives. Hospital Intelligent Twins, a new technology integration powered by IoT, AI, cloud computing, and 5G application to create all-scenario intelligence for health care and hospital management. This communication presented a smart hospital for all-scenario intelligence by creating the hospital Intelligent Twins. Intelligent Twins is widely involved in medical activities. However, solving the medical ethics, protecting patient privacy, and reducing security risks involved are significant challenges for all-scenario intelligence applications. This exploration of creating hospital Intelligent Twins that can be a worthwhile endeavor to assess how to inform evidence-based decision-making better and enhance patient satisfaction and outcomes.}, } @article {pmid35713563, year = {2023}, author = {Chen, X and Xue, Y and Sun, Y and Shen, J and Song, S and Zhu, M and Song, Z and Cheng, Z and Zhou, P}, title = {Neuromorphic Photonic Memory Devices Using Ultrafast, Non-Volatile Phase-Change Materials.}, journal = {Advanced materials (Deerfield Beach, Fla.)}, volume = {35}, number = {37}, pages = {e2203909}, doi = {10.1002/adma.202203909}, pmid = {35713563}, issn = {1521-4095}, support = {2020YFA0308800//National Key Research and Development Program of China/ ; 62074042//National Natural Science Foundation of China/ ; 20501130100//Science and Technology Commission of Shanghai Municipality/ ; //Young Scientist Project of MOE Innovation Platform/ ; //Fudan University/ ; }, abstract = {The search for ultrafast photonic memory devices is inspired by the ever-increasing number of cloud-computing, supercomputing, and artificial-intelligence applications, together with the unique advantages of signal processing in the optical domain such as high speed, large bandwidth, and low energy consumption. By embracing silicon photonics with chalcogenide phase-change materials (PCMs), non-volatile integrated photonic memory is developed with promising potential in photonic integrated circuits and nanophotonic applications. While conventional PCMs suffer from slow crystallization speed, scandium-doped antimony telluride (SST) has been recently developed for ultrafast phase-change random-access memory applications. An ultrafast non-volatile photonic memory based on an SST thin film with a 2 ns write/erase speed is demonstrated, which is the fastest write/erase speed ever reported in integrated phase-change photonic devices. SST-based photonic memories exhibit multilevel capabilities and good stability at room temperature. By mapping the memory level to the biological synapse weight, an artificial neural network based on photonic memory devices is successfully established for image classification. Additionally, a reflective nanodisplay application using SST with optoelectronic modulation capabilities is demonstrated. Both the optical and electrical changes in SST during the phase transition and the fast-switching speed demonstrate their potential for use in photonic computing, neuromorphic computing, nanophotonics, and optoelectronic applications.}, } @article {pmid35712069, year = {2022}, author = {Hassan, J and Shehzad, D and Habib, U and Aftab, MU and Ahmad, M and Kuleev, R and Mazzara, M}, title = {The Rise of Cloud Computing: Data Protection, Privacy, and Open Research Challenges-A Systematic Literature Review (SLR).}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8303504}, pmid = {35712069}, issn = {1687-5273}, mesh = {*Cloud Computing ; Computer Security ; Confidentiality ; Delivery of Health Care ; *Privacy ; }, abstract = {Cloud computing is a long-standing dream of computing as a utility, where users can store their data remotely in the cloud to enjoy on-demand services and high-quality applications from a shared pool of configurable computing resources. Thus, the privacy and security of data are of utmost importance to all of its users regardless of the nature of the data being stored. In cloud computing environments, it is especially critical because data is stored in various locations, even around the world, and users do not have any physical access to their sensitive data. Therefore, we need certain data protection techniques to protect the sensitive data that is outsourced over the cloud. In this paper, we conduct a systematic literature review (SLR) to illustrate all the data protection techniques that protect sensitive data outsourced over cloud storage. Therefore, the main objective of this research is to synthesize, classify, and identify important studies in the field of study. Accordingly, an evidence-based approach is used in this study. Preliminary results are based on answers to four research questions. Out of 493 research articles, 52 studies were selected. 52 papers use different data protection techniques, which can be divided into two main categories, namely noncryptographic techniques and cryptographic techniques. Noncryptographic techniques consist of data splitting, data anonymization, and steganographic techniques, whereas cryptographic techniques consist of encryption, searchable encryption, homomorphic encryption, and signcryption. In this work, we compare all of these techniques in terms of data protection accuracy, overhead, and operations on masked data. Finally, we discuss the future research challenges facing the implementation of these techniques.}, } @article {pmid35712065, year = {2022}, author = {Chen, M}, title = {Integration and Optimization of British and American Literature Information Resources in the Distributed Cloud Computing Environment.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4318962}, pmid = {35712065}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; Models, Theoretical ; Publications ; United States ; }, abstract = {One of the most effective approaches to improve resource usage efficiency and degree of resource collecting is to integrate resources. Many studies on the integration of information resources are also available. The search engines are the most well-known. At the same time, this article intends to optimize the integration of British and American literature information resources by employing distributed cloud computing, based on the needs of British and American literature. This research develops a model for the dispersed nature of cloud computing. It optimizes the method by fitting the mathematical model of transmission cost and latency. This article analyzes the weaknesses of the current British and American literature information resource integration and optimizes them for the integration of British and American literature resources. The Random algorithm has the longest delay, according to the results of this paper's experiments (maximum user weighted distance). The algorithms NPA-PDP and BWF have longer delays than the algorithm Opt. The percentage decline varies between 0.17 percent and 1.11 percent for different algorithms. It demonstrates that the algorithm presented in this work can be used to integrate and maximize information resources from English and American literature.}, } @article {pmid35707200, year = {2022}, author = {Chen, Y and Zhou, W}, title = {Application of Network Information Technology in Physical Education and Training System under the Background of Big Data.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {3081523}, pmid = {35707200}, issn = {1687-5273}, mesh = {Big Data ; Humans ; *Information Technology ; *Physical Education and Training ; Students ; Universities ; }, abstract = {During the last two decades, rapid development in the network technology has been observed, particularly hardware, and the development of software technology has accelerated, resulting in the launch of a variety of novel products with a wide range of applications. Traditional sports training systems, on the other hand, have a single function and a complex operation that cannot be fully implemented in colleges and universities, causing China's sports training to stagnate for a long time. The goal of physical education and training is to teach a specific action to attain its maximum potential in a variety of ways. As a result, we should use the system to collect scientifically sound and trustworthy data to aid relevant staff in completing their training tasks. Therefore, in the context of big data, network information technology has become the main way to improve the physical education system. By applying cloud computing technology, machine vision technology, and 64-bit machine technology to the physical education training system, extract the video data of the physical education system, design the system video teaching process, and complete the construction of three-dimensional human model, so as to analyze the training situation of the trainers. In this paper, 30 basketball majors in a university are selected as the professional group and 30 computer majors as the control group. The average reaction time, scores, and expert scores of the two groups are analyzed. The results show that the test of the professional group is significantly higher than that of the amateur group. At the same time, the feedback results of students using physical education and training system and normal physical education teaching and training are compared and analyzed. One week later, the students trained by the physical education system have improved their thinking ability, movement accuracy, and judgment ability, indicating that the application of the physical education training system to the actual effect is ideal.}, } @article {pmid35700763, year = {2022}, author = {Cheah, CG and Chia, WY and Lai, SF and Chew, KW and Chia, SR and Show, PL}, title = {Innovation designs of industry 4.0 based solid waste management: Machinery and digital circular economy.}, journal = {Environmental research}, volume = {213}, number = {}, pages = {113619}, doi = {10.1016/j.envres.2022.113619}, pmid = {35700763}, issn = {1096-0953}, mesh = {Artificial Intelligence ; Humans ; Industry ; Machine Learning ; *Solid Waste/analysis ; *Waste Management ; }, abstract = {The Industrial Revolution 4.0 (IR 4.0) holds the opportunity to improve the efficiency of managing solid waste through digital and machinery applications, effectively eliminating, recovering, and repurposing waste. This research aims to discover and review the potential of current technologies encompassing innovative Industry 4.0 designs for solid waste management. Machinery and processes emphasizing on circular economy were summarized and evaluated. The application of IR 4.0 technologies shows promising opportunities in improving the management and efficiency in view of solid waste. Machine learning (ML), artificial intelligence (AI), and image recognition can be used to automate the segregation of waste, reducing the risk of exposing labour workers to harmful waste. Radio Frequency Identification (RFID) and wireless communications enable the traceability in materials to better understand the opportunities in circular economy. Additionally, the interconnectivity of systems and automatic transfer of data enable the creation of more complex system that houses a larger solution space that was previously not possible such as centralised cloud computing to reduce the cost by eliminating the need for individual computing systems. Through this comprehensive review-based work, innovative Industry 4.0 components of machinery and processes involving waste management which focuses on circular economy are identified with the critical ones evaluated briefly. It was found that the current research and work done is based on applying Industry 4.0 technologies on individual waste management systems, which lacks the coherency needed to capitalise on technologies such as cloud computing, interconnectivity, big data, etc on a larger scale. Therefore, a real world comprehensive end-to-end integration aimed to optimize every process within the solid waste management chain should be explored.}, } @article {pmid35693529, year = {2022}, author = {Zhao, Y and Du, D}, title = {Research Orientation and Development of Social Psychology's Concept of Justice in the Era of Cloud Computing.}, journal = {Frontiers in psychology}, volume = {13}, number = {}, pages = {902780}, pmid = {35693529}, issn = {1664-1078}, abstract = {With the maturity and rapid expansion of social psychology, great progress has been made in the integration of social psychology with other disciplines. From the very beginning, social psychology is destined to have a diversified and multidisciplinary research orientation and disciplinary nature, which also makes it difficult for social psychology to be defined in a single disciplinary field and a single research method. With the rapid development of the Internet, the emergence of cloud computing technology not only facilitates the orientation of psychological research, but also promotes the emergence and development of some new psychological disciplines. Therefore, the purpose of this paper is to study the orientation of social psychology and its current development in the context of cloud computing era. This paper collects, organizes, and integrates the research data of college students' view of justice from the perspective of social psychology through cloud computing technology, and uses empirical research methods to conduct in-depth research on people's view of justice in social psychology. This paper collects the data reports of college students on social justice issues through cloud computing technology to make the results more accurate. The experimental results show that nearly 70% of college students pay more attention to social justice issues. This data clearly reflects the optimistic trend of people's attention to justice issues in social psychology.}, } @article {pmid35687631, year = {2023}, author = {Chu, Z and Guo, J and Guo, J}, title = {Up-Conversion Luminescence System for Quantitative Detection of IL-6.}, journal = {IEEE transactions on nanobioscience}, volume = {22}, number = {2}, pages = {203-211}, doi = {10.1109/TNB.2022.3178754}, pmid = {35687631}, issn = {1558-2639}, mesh = {Humans ; Algorithms ; *COVID-19/diagnosis ; *Interleukin-6 ; Luminescence ; Software ; }, abstract = {Interleukin-6 (IL-6) is a very important cytokine and an early predictor of survival in febrile patients (eg, patients with COVID-19). With the outbreak of the COVID-19 in the world, the significance of medical detection of interleukin 6 has gradually become prominent. A method to point-of-care(POCT) diagnosis and monitoring of IL-6 levels in patients is urgently needed. In this work, an up-conversion luminescence system (ULS) based on upconverting nanoparticles (UCNs) for quantitative detection of IL-6 was designed. The ULS consists of Micro Controller Units (MCU), transmission device, laser, image acquisition module, Bluetooth module, etc. Through hardware system acquisition and image software algorithm processing, we obtain a limit of detection (LOD) of IL-6 at 1 ng/mL, and the quantitative range is from 1 to 200 ng/mL. The system is handheld and has great detection accuracy. The detection time is 10 minutes. In addition, the system can access mobile device terminals (smartphones, personal computers, etc.) or 5G cloud servers via Bluetooth and WIFI. Patients and family members can view medical data through mobile terminals, and the data stored in the 5G cloud server can be used for edge computing and big data analysis. It is suitable for the early diagnosis of infectious diseases such as COVID-19 and has good application prospects.}, } @article {pmid35687417, year = {2022}, author = {Ito, H and Nakamura, Y and Takanari, K and Oishi, M and Matsuo, K and Kanbe, M and Uchibori, T and Ebisawa, K and Kamei, Y}, title = {Development of a Novel Scar Screening System with Machine Learning.}, journal = {Plastic and reconstructive surgery}, volume = {150}, number = {2}, pages = {465e-472e}, doi = {10.1097/PRS.0000000000009312}, pmid = {35687417}, issn = {1529-4242}, mesh = {Algorithms ; *Cicatrix, Hypertrophic/diagnosis/etiology ; Humans ; *Keloid/drug therapy ; Machine Learning ; }, abstract = {BACKGROUND: Hypertrophic scars and keloids tend to cause serious functional and cosmetic impediments to patients. As these scars are not life threatening, many patients do not seek proper treatment. Thus, educating physicians and patients regarding these scars is important. The authors aimed to develop an algorithm for a scar screening system and compare the accuracy of the system with that of physicians. This algorithm was designed to involve health care providers and patients.

METHODS: Digital images were obtained from Google Images (Google LLC, Mountain View, Calif.), open access repositories, and patients in the authors' hospital. After preprocessing, 3768 images were uploaded to the Google Cloud AutoML Vision platform and labeled with one of the four diagnoses: immature scars, mature scars, hypertrophic scars, and keloid. A consensus label for each image was compared with the label provided by physicians.

RESULTS: For all diagnoses, the average precision (positive predictive value) of the algorithm was 80.7 percent, the average recall (sensitivity) was 71 percent, and the area under the curve was 0.846. The algorithm afforded 77 correct diagnoses with an accuracy of 77 percent. Conversely, the average physician accuracy was 68.7 percent. The Cohen kappa coefficient of the algorithm was 0.69, while that of the physicians was 0.59.

CONCLUSIONS: The authors developed a computer vision algorithm that can diagnose four scar types using automated machine learning. Future iterations of this algorithm, with more comprehensive accuracy, can be embedded in telehealth and digital imaging platforms used by patients and primary doctors. The scar screening system with machine learning may be a valuable support tool for physicians and patients.

Diagnostic, II.}, } @article {pmid35684889, year = {2022}, author = {Hanzelik, PP and Kummer, A and Abonyi, J}, title = {Edge-Computing and Machine-Learning-Based Framework for Software Sensor Development.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {11}, pages = {}, pmid = {35684889}, issn = {1424-8220}, mesh = {*Artificial Intelligence ; Cloud Computing ; *Internet of Things ; Machine Learning ; Software ; }, abstract = {The present research presents a framework that supports the development and operation of machine-learning (ML) algorithms to develop, maintain and manage the whole lifecycle of modeling software sensors related to complex chemical processes. Our motivation is to take advantage of ML and edge computing and offer innovative solutions to the chemical industry for difficult-to-measure laboratory variables. The purpose of software sensor models is to continuously forecast the quality of products to achieve effective quality control, maintain the stable production condition of plants, and support efficient, environmentally friendly, and harmless laboratory work. As a result of the literature review, quite a few ML models have been developed in recent years that support the quality assurance of different types of materials. However, the problems of continuous operation, maintenance and version control of these models have not yet been solved. The method uses ML algorithms and takes advantage of cloud services in an enterprise environment. Industrial 4.0 devices such as the Internet of Things (IoT), edge computing, cloud computing, ML, and artificial intelligence (AI) are core techniques. The article outlines an information system structure and the related methodology based on data from a quality-assurance laboratory. During the development, we encountered several challenges resulting from the continuous development of ML models and the tuning of their parameters. The article discusses the development, version control, validation, lifecycle, and maintenance of ML models and a case study. The developed framework can continuously monitor the performance of the models and increase the amount of data that make up the models. As a result, the most accurate, data-driven and up-to-date models are always available to quality-assurance engineers with this solution.}, } @article {pmid35684844, year = {2022}, author = {Lin, HY and Tsai, TT and Ting, PY and Chen, CC}, title = {An Improved ID-Based Data Storage Scheme for Fog-Enabled IoT Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {11}, pages = {}, pmid = {35684844}, issn = {1424-8220}, abstract = {In a fog-enabled IoT environment, a fog node is regarded as the proxy between end users and cloud servers to reduce the latency of data transmission, so as to fulfill the requirement of more real-time applications. A data storage scheme utilizing fog computing architecture allows a user to share cloud data with other users via the assistance of fog nodes. In particular, a fog node obtaining a re-encryption key of the data owner is able to convert a cloud ciphertext into the one which is decryptable by another designated user. In such a scheme, a proxy should not learn any information about the plaintext during the transmission and re-encryption processes. In 2020, an ID-based data storage scheme utilizing anonymous key generation in fog computing was proposed by some researchers. Although their protocol is provably secure in a proof model of random oracles, we will point out that there are some security flaws inherited in their protocol. On the basis of their work, we further present an improved variant, which not only eliminates their security weaknesses, but also preserves the functionalities of anonymous key generation and user revocation mechanism. Additionally, under the Decisional Bilinear Diffie-Hellman (DBDH) assumption, we demonstrate that our enhanced construction is also provably secure in the security notion of IND-PrID-CPA.}, } @article {pmid35684754, year = {2022}, author = {Bhatia, S and Alsuwailam, RI and Roy, DG and Mashat, A}, title = {Improved Multimedia Object Processing for the Internet of Vehicles.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {11}, pages = {}, pmid = {35684754}, issn = {1424-8220}, support = {AN000533//King Faisal University/ ; }, mesh = {Algorithms ; Automation ; *Internet of Things ; *Multimedia ; }, abstract = {The combination of edge computing and deep learning helps make intelligent edge devices that can make several conditional decisions using comparatively secured and fast machine learning algorithms. An automated car that acts as the data-source node of an intelligent Internet of vehicles or IoV system is one of these examples. Our motivation is to obtain more accurate and rapid object detection using the intelligent cameras of a smart car. The competent supervision camera of the smart automobile model utilizes multimedia data for real-time automation in real-time threat detection. The corresponding comprehensive network combines cooperative multimedia data processing, Internet of Things (IoT) fact handling, validation, computation, precise detection, and decision making. These actions confront real-time delays during data offloading to the cloud and synchronizing with the other nodes. The proposed model follows a cooperative machine learning technique, distributes the computational load by slicing real-time object data among analogous intelligent Internet of Things nodes, and parallel vision processing between connective edge clusters. As a result, the system increases the computational rate and improves accuracy through responsible resource utilization and active-passive learning. We achieved low latency and higher accuracy for object identification through real-time multimedia data objectification.}, } @article {pmid35684631, year = {2022}, author = {Jiao, Z and Zhou, F and Wang, Q and Sun, J}, title = {RPVC: A Revocable Publicly Verifiable Computation Solution for Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {11}, pages = {}, pmid = {35684631}, issn = {1424-8220}, support = {62072090, 62173101, and 61902057//National Natural Science Foundation of China/ ; }, mesh = {Algorithms ; *Cloud Computing ; }, abstract = {With publicly verifiable computation (PVC) development, users with limited resources prefer to outsource computing tasks to cloud servers. However, existing PVC schemes are mainly proposed for cloud computing scenarios, which brings bandwidth consumption or network delay of IoT devices in edge computing. In addition, dishonest edge servers may reduce resource utilization by returning unreliable results. Therefore, we propose a revocable publicly verifiable computation(RPVC) scheme for edge computing. On the one hand, RPVC ensures that users can verify the correct results at a small cost. On the other hand, it can revoke the computing abilities of dishonest edge servers. First, polynomial commitments are employed to reduce proofs' length and generation speed. Then, we improve revocable group signature by knowledge signatures and subset covering theory. This makes it possible to revoke dishonest edge servers. Finally, theoretical analysis proves that RPVC has correctness and security, and experiments evaluate the efficiency of RPVC.}, } @article {pmid35677770, year = {2022}, author = {Loo, WK and Hasikin, K and Suhaimi, A and Yee, PL and Teo, K and Xia, K and Qian, P and Jiang, Y and Zhang, Y and Dhanalakshmi, S and Azizan, MM and Lai, KW}, title = {Systematic Review on COVID-19 Readmission and Risk Factors: Future of Machine Learning in COVID-19 Readmission Studies.}, journal = {Frontiers in public health}, volume = {10}, number = {}, pages = {898254}, pmid = {35677770}, issn = {2296-2565}, mesh = {*COVID-19/epidemiology ; Humans ; Logistic Models ; Machine Learning ; *Patient Readmission ; Risk Factors ; United States ; }, abstract = {In this review, current studies on hospital readmission due to infection of COVID-19 were discussed, compared, and further evaluated in order to understand the current trends and progress in mitigation of hospital readmissions due to COVID-19. Boolean expression of ("COVID-19" OR "covid19" OR "covid" OR "coronavirus" OR "Sars-CoV-2") AND ("readmission" OR "re-admission" OR "rehospitalization" OR "rehospitalization") were used in five databases, namely Web of Science, Medline, Science Direct, Google Scholar and Scopus. From the search, a total of 253 articles were screened down to 26 articles. In overall, most of the research focus on readmission rates than mortality rate. On the readmission rate, the lowest is 4.2% by Ramos-Martínez et al. from Spain, and the highest is 19.9% by Donnelly et al. from the United States. Most of the research (n = 13) uses an inferential statistical approach in their studies, while only one uses a machine learning approach. The data size ranges from 79 to 126,137. However, there is no specific guide to set the most suitable data size for one research, and all results cannot be compared in terms of accuracy, as all research is regional studies and do not involve data from the multi region. The logistic regression is prevalent in the research on risk factors of readmission post-COVID-19 admission, despite each of the research coming out with different outcomes. From the word cloud, age is the most dominant risk factor of readmission, followed by diabetes, high length of stay, COPD, CKD, liver disease, metastatic disease, and CAD. A few future research directions has been proposed, including the utilization of machine learning in statistical analysis, investigation on dominant risk factors, experimental design on interventions to curb dominant risk factors and increase the scale of data collection from single centered to multi centered.}, } @article {pmid35677629, year = {2022}, author = {Ghosh, S and Mukherjee, A}, title = {STROVE: spatial data infrastructure enabled cloud-fog-edge computing framework for combating COVID-19 pandemic.}, journal = {Innovations in systems and software engineering}, volume = {}, number = {}, pages = {1-17}, pmid = {35677629}, issn = {1614-5046}, abstract = {The outbreak of 2019 novel coronavirus (COVID-19) has triggered unprecedented challenges and put the whole world in a parlous condition. The impacts of COVID-19 is a matter of grave concern in terms of fatality rate, socio-economical condition, health infrastructure. It is obvious that only pharmaceutical solutions (vaccine) cannot eradicate this pandemic completely, and effective strategies regarding lockdown measures, restricted mobility, emergency services to users-in brief data-driven decision system is of utmost importance. This necessitates an efficient data analytics framework, data infrastructure to store, manage pandemic related information, and distributed computing platform to support such data-driven operations. In the past few decades, Internet of Things-based devices and applications have emerged significantly in various sectors including healthcare and time-critical applications. To be specific, health-sensors help to accumulate health-related parameters at different time-instances of a day, the movement sensors keep track of mobility traces of the user, and helps to assist them in varied conditions. The smartphones are equipped with several such sensors and the ability of low-cost connected sensors to cover large areas makes it the most useful component to combat pandemics such as COVID-19. However, analysing and managing the huge amount of data generated by these sensors is a big challenge. In this paper we have proposed a unified framework which has three major components: (i) Spatial Data Infrastructure to manage, store, analyse and share spatio-temporal information with stakeholders efficiently, (ii) Cloud-Fog-Edge-based hierarchical architecture to support preliminary diagnosis, monitoring patients' mobility, health parameters and activities while they are in quarantine or home-based treatment, and (iii) Assisting users in varied emergency situation leveraging efficient data-driven techniques at low-latency and energy consumption. The mobility data analytics along with SDI is required to interpret the movement dynamics of the region and correlate with COVID-19 hotspots. Further, Cloud-Fog-Edge-based system architecture is required to provision healthcare services efficiently and in timely manner. The proposed framework yields encouraging results in taking decisions based on the COVID-19 context and assisting users effectively by enhancing accuracy of detecting suspected infected people by ∼ 24% and reducing delay by ∼ 55% compared to cloud-only system.}, } @article {pmid35677197, year = {2022}, author = {Zhang, Y and Zhao, H and Peng, D}, title = {Exploration and Research on Smart Sports Classrooms in Colleges in the Information Age.}, journal = {Applied bionics and biomechanics}, volume = {2022}, number = {}, pages = {2970496}, pmid = {35677197}, issn = {1176-2322}, abstract = {Smart classrooms, made possible by the growing use of Internet information technology in the sphere of education, as one of the important foundations for the realization of smart education, have become the current hot direction of the development of educational information innovation and intend to propose some ideas and directions for smart sports teaching research in IA colleges and universities. The smart classroom is an intelligent and efficient classroom created by the "Internet +" way of thinking and the new generation of information technologies such as big data and cloud computing. This article puts forward the exploratory research methods of smart sports classrooms in colleges and universities in the IA, methods, such as document retrieval, expert interviews, questionnaire surveys, and practical research, and field investigation method, which are used in the exploration and research of college smart sports classrooms in the IA experiment. According to the findings of this study, 96.34 percent of students have a positive attitude toward the smart sports classroom teaching model, which is favorable to the growth of smart sports classroom teaching.}, } @article {pmid35676964, year = {2022}, author = {Nair, R and Zafrullah, SN and Vinayasree, P and Singh, P and Zahra, MMA and Sharma, T and Ahmadi, F}, title = {Blockchain-Based Decentralized Cloud Solutions for Data Transfer.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8209854}, pmid = {35676964}, issn = {1687-5273}, mesh = {*Blockchain ; Cloud Computing ; Information Storage and Retrieval ; }, abstract = {Cloud computing has increased its service area and user experience above traditional platforms through virtualization and resource integration, resulting in substantial economic and societal advantages. Cloud computing is experiencing a significant security and trust dilemma, requiring a trust-enabled transaction environment. The typical cloud trust model is centralized, resulting in high maintenance costs, network congestion, and even single-point failure. Also, due to a lack of openness and traceability, trust rating findings are not universally acknowledged. "Blockchain is a novel, decentralised computing system. Its unique operational principles and record traceability assure the transaction data's integrity, undeniability, and security. So, blockchain is ideal for building a distributed and decentralised trust infrastructure. This study addresses the difficulty of transferring data and related permission policies from the cloud to the distributed file systems (DFS). Our aims include moving the data files from the cloud to the distributed file system and developing a cloud policy. This study addresses the difficulty of transferring data and related permission policies from the cloud to the DFS. In DFS, no node is given the privilege, and storage of all the data is dependent on content-addressing. The data files are moved from Amazon S3 buckets to the interplanetary file system (IPFS). In DFS, no node is given the privilege, and storage of all the data is dependent on content-addressing.}, } @article {pmid35673063, year = {2022}, author = {Anderson, B and Cameron, J and Jefferson, U and Reeder, B}, title = {Designing a Cloud-Based System for Affordable Cyberinfrastructure to Support Software-Based Research.}, journal = {Studies in health technology and informatics}, volume = {290}, number = {}, pages = {489-493}, doi = {10.3233/SHTI220124}, pmid = {35673063}, issn = {1879-8365}, mesh = {*Cloud Computing ; Research ; *Software ; }, abstract = {Interest in cloud-based cyberinfrastructure among higher-education institutions is growing rapidly, driven by needs to realize cost savings and access enhanced computing resources. Through a nonprofit entity, we have created a platform that provides hosting and software support services enabling researchers to responsibly build on cloud technologies. However, there are technical, logistic, and administrative challenges if this platform is to support all types of research. Software-enhanced research is distinctly different from industry applications, typically characterized by needs for lower reduced availability, greater flexibility, and fewer resources for upkeep costs. We describe a swarm environment specifically designed for research in academic settings and our experience developing an operating model for sustainable cyberinfrastructure. We also present three case studies illustrating the types of applications supported by the cyberinfrastructure and explore techniques that address specific application needs. Our findings demonstrate safer, faster, cheaper cloud services by recognizing the intrinsic properties of academic research environments.}, } @article {pmid35673000, year = {2022}, author = {Ruokolainen, J and Haladijan, J and Juutinen, M and Puustinen, J and Holm, A and Vehkaoja, A and Nieminen, H}, title = {Mobilemicroservices Architecture for Remote Monitoring of Patients: A Feasibility Study.}, journal = {Studies in health technology and informatics}, volume = {290}, number = {}, pages = {200-204}, doi = {10.3233/SHTI220061}, pmid = {35673000}, issn = {1879-8365}, mesh = {Cloud Computing ; Delivery of Health Care ; Feasibility Studies ; Humans ; Monitoring, Physiologic ; *Telemedicine ; }, abstract = {Recent developments in smart mobile devices (SMDs), wearable sensors, the Internet, mobile networks, and computing power provide new healthcare opportunities that are not restricted geographically. This paper aims to introduce Mobilemicroservices Architecture (MMA) based on a study on architectures. In MMA, an HTTP-based Mobilemicroservivce (MM) is allocated to each SMD's sensor. The key benefits are extendibility, scalability, ease of use for the patient, security, and the possibility to collect raw data without the necessity to involve cloud services. Feasibility was investigated in a two-year project, where MMA-based solutions were used to collect motor function data from patients with Parkinson's disease. First, we collected motor function data from 98 patients and healthy controls during their visit to a clinic. Second, we monitored the same subjects in real-time for three days in their everyday living environment. These MMA applications represent HTTP-based business-logic computing in which the SMDs' resources are accessible globally.}, } @article {pmid35669983, year = {2022}, author = {Khan, NJ and Ahamad, G and Naseem, M}, title = {An IoT/FOG based framework for sports talent identification in COVID-19 like situations.}, journal = {International journal of information technology : an official journal of Bharati Vidyapeeth's Institute of Computer Applications and Management}, volume = {14}, number = {5}, pages = {2513-2521}, pmid = {35669983}, issn = {2511-2112}, abstract = {COVID-19 crippled all the domains of our society. The inevitable lockdowns and social distancing procedures have hit the process of traditional sports talent identification (TiD) severely. This will interrupt the career-excellency of athletes and will also affect the future talent in the years to come. We explore the effect of COVID-19 on sports talent identification and propose an IoT/Fog-based framework for theTiD process during COVID-19 and COVID-like situations. Our proposed novel six-layer model facilitates the sports talent identification remotely using the various latest Information and Communication Technologies like IoT, fog and cloud computing. All the stakeholders like experts, coaches, players, institutes etc. are taken into consideration. The framework is mobile, widely accessible, scalable, cost-effective, secure, platform/location independent and fast. A brief case study of cricket talent identification using the proposed framework is also provided.}, } @article {pmid35669659, year = {2022}, author = {Li, K}, title = {Application of Artificial Intelligence System Based on Wireless Sensor Network in Enterprise Management.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {2169521}, pmid = {35669659}, issn = {1687-5273}, mesh = {Artificial Intelligence ; *Computer Communication Networks ; Remote Sensing Technology ; Technology ; *Wireless Technology ; }, abstract = {With the improvement of the ability to acquire natural information, wireless sensor networks also need to transmit corresponding information in terms of collecting information. Wireless sensor nodes have great application prospects as a key component of wireless sensors. Therefore, different wireless sensors play an important decisive role in the operation of wireless network applications. With the continuous development of wireless sensor networks, existing wireless sensor network nodes exhibit limitations and shortcomings such as inflexible structure, low variability, and low versatility. Specifically, the learning and neural networks obtained by different artificial intelligence expert systems in computing technology are different. On the one hand, it can meet the needs of users for information systems to a certain extent, and on the other hand, it can also help accelerate the development of computer science. At present, the new generation of information technology industry is listed in the seven emerging strategic industries of the country. The new cloud computing technology has gradually expanded to important corporate governance capabilities in terms of information technology. The intelligent application of cloud computing technology replaces traditional enterprise management technology. Efficiency management and risk management can improve the quality and business capabilities of the entire enterprise, improve system applications according to the actual situation of the enterprise, improve system applications, and implement health and the sustainable development of the enterprise, thereby promoting the sustainable development of the computer technology industry.}, } @article {pmid35669657, year = {2022}, author = {Yang, M and Gao, C and Han, J}, title = {Edge Computing Deployment Algorithm and Sports Training Data Mining Based on Software Defined Network.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8056360}, pmid = {35669657}, issn = {1687-5273}, mesh = {Algorithms ; *Cloud Computing ; *Data Mining ; Software ; Technology ; }, abstract = {The wireless sensor network collects data from various areas through specific network nodes and uploads it to the decision-making layer for analysis and processing. Therefore, it has become a perception network of the Internet of Things and has made great achievements in monitoring and prevention at this stage. At this stage, the main problem is the motive power of sensor nodes, so the energy storage and transmission of wireless sensor network is imminent. Mobile edge computing technology provides a new type of technology for today's edge networks, enabling it to process resource-intensive data blocks and feedback to managers in time. It is a new starting point for cloud computing services, compared to traditional cloud computing services. The transmission speed is more efficient and will be widely used in various industries and serve them in the future. Among them, education and related industries urgently need in-depth information, which in turn promotes the rapid development of data mining by sensor networks. This article focuses on data mining technology, mainly expounds the meaning and main mining methods of data mining technology, and conducts data mining on sports training requirements from the aspects of demand collection and analysis, algorithm design and optimization, demand results and realization, etc. Monitor the training status and give the trainer reasonable suggestions. Through the processing of the training data mining results and proofreading the database standardized training data, we can formulate a personalized program suitable for sportsmen, reduce sports injuries caused by no trainer's guidance, and open new doors for training modes. Therefore, this paper studies the sensor network technology, edge computing deployment algorithm, and sports training data mining.}, } @article {pmid35668959, year = {2022}, author = {Zhong, M and Ali, M and Faqir, K and Begum, S and Haider, B and Shahzad, K and Nosheen, N}, title = {China Pakistan Economic Corridor Digital Transformation.}, journal = {Frontiers in psychology}, volume = {13}, number = {}, pages = {887848}, pmid = {35668959}, issn = {1664-1078}, abstract = {The China-Pakistan Economic Corridor (CPEC) vision and mission are to improve the people's living standards of Pakistan and China through bilateral investments, trade, cultural exchanges, and economic activities. To achieve this envisioned dream, Pakistan established the China-Pakistan Economic Corridor Authority (CPECA) to further its completion, but Covid-19 slowed it down. This situation compelled the digitalization of CPEC. This article reviews the best practices and success stories of various digitalization and e-governance programs and, in this light, advises the implementation of the Ajman Digital Governance (ADG) model as a theoretical framework for CPEC digitalization. This article concludes that the Pakistani government needs to transform CPEC digitalization by setting up the CPEC Digitalization and Transformation Center (DTC) at the CPECA office to attract more investors and businesses.}, } @article {pmid35668732, year = {2023}, author = {Butt, UA and Amin, R and Aldabbas, H and Mohan, S and Alouffi, B and Ahmadian, A}, title = {Cloud-based email phishing attack using machine and deep learning algorithm.}, journal = {Complex & intelligent systems}, volume = {9}, number = {3}, pages = {3043-3070}, pmid = {35668732}, issn = {2198-6053}, abstract = {Cloud computing refers to the on-demand availability of personal computer system assets, specifically data storage and processing power, without the client's input. Emails are commonly used to send and receive data for individuals or groups. Financial data, credit reports, and other sensitive data are often sent via the Internet. Phishing is a fraudster's technique used to get sensitive data from users by seeming to come from trusted sources. The sender can persuade you to give secret data by misdirecting in a phished email. The main problem is email phishing attacks while sending and receiving the email. The attacker sends spam data using email and receives your data when you open and read the email. In recent years, it has been a big problem for everyone. This paper uses different legitimate and phishing data sizes, detects new emails, and uses different features and algorithms for classification. A modified dataset is created after measuring the existing approaches. We created a feature extracted comma-separated values (CSV) file and label file, applied the support vector machine (SVM), Naive Bayes (NB), and long short-term memory (LSTM) algorithm. This experimentation considers the recognition of a phished email as a classification issue. According to the comparison and implementation, SVM, NB and LSTM performance is better and more accurate to detect email phishing attacks. The classification of email attacks using SVM, NB, and LSTM classifiers achieve the highest accuracy of 99.62%, 97% and 98%, respectively.}, } @article {pmid35665291, year = {2022}, author = {Kumar, RR and Tomar, A and Shameem, M and Alam, MN}, title = {OPTCLOUD: An Optimal Cloud Service Selection Framework Using QoS Correlation Lens.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {2019485}, pmid = {35665291}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; }, abstract = {Cloud computing has grown as a computing paradigm in the last few years. Due to the explosive increase in the number of cloud services, QoS (quality of service) becomes an important factor in service filtering. Moreover, it becomes a nontrivial problem when comparing the functionality of cloud services with different performance metrics. Therefore, optimal cloud service selection is quite challenging and extremely important for users. In the existing approaches of cloud service selection, the user's preferences are offered by the user in a quantitative form. With fuzziness and subjectivity, it is a hurdle task for users to express clear preferences. Moreover, many QoS attributes are not independent but interrelated; therefore, the existing weighted summation method cannot accommodate correlations among QoS attributes and produces inaccurate results. To resolve this problem, we propose a cloud service framework that takes the user's preferences and chooses the optimal cloud service based on the user's QoS constraints. We propose a cloud service selection algorithm, based on principal component analysis (PCA) and the best-worst method (BWM), which eliminates the correlations between QoS and provides the best cloud services with the best QoS values for users. In the end, a numerical example is shown to validate the effectiveness and feasibility of the proposed methodology.}, } @article {pmid35655579, year = {2022}, author = {Ma, S and Liu, ZP}, title = {Machine learning potential era of zeolite simulation.}, journal = {Chemical science}, volume = {13}, number = {18}, pages = {5055-5068}, pmid = {35655579}, issn = {2041-6520}, abstract = {Zeolites, owing to their great variety and complexity in structure and wide applications in chemistry, have long been the hot topic in chemical research. This perspective first presents a short retrospect of theoretical investigations on zeolites using the tools from classical force fields to quantum mechanics calculations and to the latest machine learning (ML) potential simulations. ML potentials as the next-generation technique for atomic simulation open new avenues to simulate and interpret zeolite systems and thus hold great promise for finally predicting the structure-functionality relation of zeolites. Recent advances using ML potentials are then summarized from two main aspects: the origin of zeolite stability and the mechanism of zeolite-related catalytic reactions. We also discussed the possible scenarios of ML potential application aiming to provide instantaneous and easy access of zeolite properties. These advanced applications could now be accomplished by combining cloud-computing-based techniques with ML potential-based atomic simulations. The future development of ML potentials for zeolites in the respects of improving the calculation accuracy, expanding the application scope and constructing the zeolite-related datasets is finally outlooked.}, } @article {pmid35651671, year = {2022}, author = {Francini, S and Chirici, G}, title = {A Sentinel-2 derived dataset of forest disturbances occurred in Italy between 2017 and 2020.}, journal = {Data in brief}, volume = {42}, number = {}, pages = {108297}, pmid = {35651671}, issn = {2352-3409}, abstract = {Forests absorb 30% of human emissions associated with fossil fuel burning. For this reason, forest disturbances monitoring is needed for assessing greenhouse gas balance. However, in several countries, the information regarding the spatio-temporal distribution of forest disturbances is missing. Remote sensing data and the new Sentinel-2 satellite missions, in particular, represent a game-changer in this topic. Here we provide a spatially explicit dataset (10-meters resolution) of Italian forest disturbances and magnitude from 2017 to 2020 constructed using Sentinel-2 level-1C imagery and exploiting the Google Earth Engine GEE implementation of the 3I3D algorithm. For each year between 2017 and 2020, we provide three datasets: (i) a magnitude of the change map (between 0 and 255), (ii) a categorical map of forest disturbances, and (iii) a categorical map obtained by stratification of the previous maps that can be used to estimate the areas of several different forest disturbances. The data we provide represent the state-of-the-art for Mediterranean ecosystems in terms of omission and commission errors, they support greenhouse gas balance, forest sustainability assessment, and decision-makers forest managing, they help forest companies to monitor forest harvestings activity over space and time, and, supported by reference data, can be used to obtain the national estimates of forest harvestings and disturbances that Italy is called upon to provide.}, } @article {pmid35649841, year = {2022}, author = {Sakshuwong, S and Weir, H and Raucci, U and Martínez, TJ}, title = {Bringing chemical structures to life with augmented reality, machine learning, and quantum chemistry.}, journal = {The Journal of chemical physics}, volume = {156}, number = {20}, pages = {204801}, doi = {10.1063/5.0090482}, pmid = {35649841}, issn = {1089-7690}, mesh = {*Augmented Reality ; Machine Learning ; Molecular Conformation ; }, abstract = {Visualizing 3D molecular structures is crucial to understanding and predicting their chemical behavior. However, static 2D hand-drawn skeletal structures remain the preferred method of chemical communication. Here, we combine cutting-edge technologies in augmented reality (AR), machine learning, and computational chemistry to develop MolAR, an open-source mobile application for visualizing molecules in AR directly from their hand-drawn chemical structures. Users can also visualize any molecule or protein directly from its name or protein data bank ID and compute chemical properties in real time via quantum chemistry cloud computing. MolAR provides an easily accessible platform for the scientific community to visualize and interact with 3D molecular structures in an immersive and engaging way.}, } @article {pmid35646109, year = {2021}, author = {Sauber, AM and El-Kafrawy, PM and Shawish, AF and Amin, MA and Hagag, IM}, title = {A New Secure Model for Data Protection over Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2021}, number = {}, pages = {8113253}, pmid = {35646109}, issn = {1687-5273}, mesh = {*Cloud Computing ; Computer Security ; *Confidentiality ; Information Storage and Retrieval ; }, abstract = {The main goal of any data storage model on the cloud is accessing data in an easy way without risking its security. A security consideration is a major aspect in any cloud data storage model to provide safety and efficiency. In this paper, we propose a secure data protection model over the cloud. The proposed model presents a solution to some security issues of cloud such as data protection from any violations and protection from a fake authorized identity user, which adversely affects the security of the cloud. This paper includes multiple issues and challenges with cloud computing that impairs security and privacy of data. It presents the threats and attacks that affect data residing in the cloud. Our proposed model provides the benefits and effectiveness of security in cloud computing such as enhancement of the encryption of data in the cloud. It provides security and scalability of data sharing for users on the cloud computing. Our model achieves the security functions over cloud computing such as identification and authentication, authorization, and encryption. Also, this model protects the system from any fake data owner who enters malicious information that may destroy the main goal of cloud services. We develop the one-time password (OTP) as a logging technique and uploading technique to protect users and data owners from any fake unauthorized access to the cloud. We implement our model using a simulation of the model called Next Generation Secure Cloud Server (NG-Cloud). These results increase the security protection techniques for end user and data owner from fake user and fake data owner in the cloud.}, } @article {pmid35645427, year = {2022}, author = {Algani, YMA and Boopalan, K and Elangovan, G and Santosh, DT and Chanthirasekaran, K and Patra, I and Pughazendi, N and Kiranbala, B and Nikitha, R and Saranya, M}, title = {Autonomous service for managing real time notification in detection of COVID-19 virus.}, journal = {Computers & electrical engineering : an international journal}, volume = {101}, number = {}, pages = {108117}, pmid = {35645427}, issn = {0045-7906}, abstract = {In today's world, the most prominent public issue in the field of medicine is the rapid spread of viral sickness. The seriousness of the disease lies in its fast spreading nature. The main aim of the study is the proposal of a framework for the earlier detection and forecasting of the COVID-19 virus infection amongst the people to avoid the spread of the disease across the world by undertaking the precautionary measures. According to this framework, there are four stages for the proposed work. This includes the collection of necessary data followed by the classification of the collected information which is then taken in the process of mining and extraction and eventually ending with the process of decision modelling. Since the frequency of the infection is very often a prescient one, the probabilistic examination is measured as a degree of membership characterised by the fever measure related to the same. The predictions are thereby realised using the temporal RNN. The model finally provides effective outcomes in the efficiency of classification, reliability, the prediction viability etc.}, } @article {pmid35639724, year = {2022}, author = {Wang, C and Zhang, M}, title = {The road to change: Broadband China strategy and enterprise digitization.}, journal = {PloS one}, volume = {17}, number = {5}, pages = {e0269133}, pmid = {35639724}, issn = {1932-6203}, mesh = {*Artificial Intelligence ; China ; Cloud Computing ; Commerce ; *Organizations ; }, abstract = {The digitization of a company necessitates not only the effort of the company but also state backing of network infrastructure. In this study, we applied the difference-in-differences method to examine the impact of the Broadband China Strategy on corporate digitalization and its heterogeneity using the data from Chinese listed firms from 2010 to 2020. The results show that the improvement in network infrastructure plays a vital role in promoting company digitization; this improvement is extremely varied due to variances in market demand and endowments. Non-state-owned firms, businesses in the eastern area, and technology-intensive businesses have profited the most. Among the five types of digitization, artificial intelligence and cloud computing are top priorities for enterprises. Our findings add to the literature on the spillover effects of broadband construction and the factors affecting enterprise digitalization.}, } @article {pmid35637932, year = {2022}, author = {Martín, A and Camacho, D}, title = {Recent advances on effective and efficient deep learning-based solutions.}, journal = {Neural computing & applications}, volume = {34}, number = {13}, pages = {10205-10210}, pmid = {35637932}, issn = {0941-0643}, abstract = {This editorial briefly analyses, describes, and provides a short summary of a set of selected papers published in a special issue focused on deep learning methods and architectures and their application to several domains and research areas. The set of selected and published articles covers several aspects related to two basic aspects in deep learning (DL) methods, efficiency of the models and effectiveness of the architectures These papers revolve around different interesting application domains such as health (e.g. cancer, polyps, melanoma, mental health), wearable technologies solar irradiance, social networks, cloud computing, wind turbines, object detection, music, and electricity, among others. This editorial provides a short description of each published article and a brief analysis of their main contributions.}, } @article {pmid35635621, year = {2022}, author = {Yan, EG and Arzt, NH}, title = {A Commentary on Process Improvements to Reduce Manual Tasks and Paper at Covid-19 Mass Vaccination Points of Dispensing in California.}, journal = {Journal of medical systems}, volume = {46}, number = {7}, pages = {47}, pmid = {35635621}, issn = {1573-689X}, mesh = {*COVID-19/prevention & control ; California ; Child ; Humans ; *Mass Vaccination ; Vaccination ; }, abstract = {My Turn is software used to manage several Covid-19 mass vaccination campaigns in California. The objective of this article is to describe the use of My Turn at two points of dispensing in California and comment on process improvements to reduce manual tasks of six identified processes of vaccination-registration, scheduling, administration, documentation, follow-up, and digital vaccine record-and paper. We reviewed publicly available documents of My Turn and patients vaccinated at George R. Moscone Convention Center in San Francisco and Oakland Coliseum Community Vaccination Clinic. For publicly available documents of My Turn, we examined videos of My Turn on YouTube, and documentation from EZIZ, the website for the California Vaccines for Children Program. For patients, we examined publicly available vaccination record cards on Instagram and Google. At the George R. Moscone Convention Center, 329,608 vaccines doses were given. At the Oakland Coliseum Community Vaccination Clinic, more than 500,000 vaccine doses were administered. The use of My Turn can be used to reduce manual tasks and paper for mass vaccinating patients against Covid-19.}, } @article {pmid35634070, year = {2022}, author = {Rahmani, MKI and Shuaib, M and Alam, S and Siddiqui, ST and Ahmad, S and Bhatia, S and Mashat, A}, title = {Blockchain-Based Trust Management Framework for Cloud Computing-Based Internet of Medical Things (IoMT): A Systematic Review.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9766844}, pmid = {35634070}, issn = {1687-5273}, mesh = {*Blockchain ; Cloud Computing ; Internet ; Reproducibility of Results ; Trust ; }, abstract = {The internet of medical things (IoMT) is a smart medical device structure that includes apps, health services, and systems. These medical equipment and applications are linked to healthcare systems via the internet. Because IoT devices lack computational power, the collected data can be processed and analyzed in the cloud by more computationally intensive tools. Cloud computing in IoMT is also used to store IoT data as part of a collaborative effort. Cloud computing has provided new avenues for providing services to users with better user experience, scalability, and proper resource utilization compared to traditional platforms. However, these cloud platforms are susceptible to several security breaches evident from recent and past incidents. Trust management is a crucial feature required for providing secure and reliable service to users. The traditional trust management protocols in the cloud computing situation are centralized and result in single-point failure. Blockchain has emerged as the possible use case for the domain that requires trust and reliability in several aspects. Different researchers have presented various blockchain-based trust management approaches. This study reviews the trust challenges in cloud computing and analyzes how blockchain technology addresses these challenges using blockchain-based trust management frameworks. There are ten (10) solutions under two broad categories of decentralization and security. These challenges are centralization, huge overhead, trust evidence, less adaptive, and inaccuracy. This systematic review has been performed in six stages: identifying the research question, research methods, screening the related articles, abstract and keyword examination, data retrieval, and mapping processing. Atlas.ti software is used to analyze the relevant articles based on keywords. A total of 70 codes and 262 quotations are compiled, and furthermore, these quotations are categorized using manual coding. Finally, 20 solutions under two main categories of decentralization and security were retrieved. Out of these ten (10) solutions, three (03) fell in the security category, and the rest seven (07) came under the decentralization category.}, } @article {pmid35634057, year = {2022}, author = {Ni, Q}, title = {Deep Neural Network Model Construction for Digital Human Resource Management with Human-Job Matching.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {1418020}, pmid = {35634057}, issn = {1687-5273}, mesh = {*Algorithms ; Big Data ; Humans ; *Neural Networks, Computer ; Workforce ; }, abstract = {This article uses deep neural network technology and combines digital HRM knowledge to research human-job matching systematically. Through intelligent digital means such as 5G communication, cloud computing, big data, neural network, and user portrait, this article proposes the design of the corresponding digital transformation strategy of HRM. This article further puts forward the guaranteed measures in enhancing HRM thinking and establishing HRM culture to ensure the smooth implementation of the digital transformation strategy of the HRM. This system uses charts for data visualization and flask framework for background construction, and the data is stored through CSV files, My SQL, and configuration files. The system is based on a deep learning algorithm for job applicant matching, intelligent recommendation of jobs for job seekers, and more real help for job applicants to apply for jobs. The job intelligent recommendation algorithm partly adopts bidirectional long and short-term memory neural network (Bi-LSTM) and the word-level human post-matching neural network APJFNN built by the attention mechanism. By embedding the text representation of job demand information into the representation vector of public space, a joint embedded convolutional neural network (JE-CNN) for post matching analysis is designed and implemented. The quantitative analysis method analyzes the degree of matching with the job.}, } @article {pmid35632364, year = {2022}, author = {Umoren, O and Singh, R and Pervez, Z and Dahal, K}, title = {Securing Fog Computing with a Decentralised User Authentication Approach Based on Blockchain.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {10}, pages = {}, pmid = {35632364}, issn = {1424-8220}, mesh = {Biometry ; *Blockchain ; Computer Security ; Privacy ; Reproducibility of Results ; }, abstract = {The use of low-cost sensors in IoT over high-cost devices has been considered less expensive. However, these low-cost sensors have their own limitations such as the accuracy, quality, and reliability of the data collected. Fog computing offers solutions to those limitations; nevertheless, owning to its intrinsic distributed architecture, it faces challenges in the form of security of fog devices, secure authentication and privacy. Blockchain technology has been utilised to offer solutions for the authentication and security challenges in fog systems. This paper proposes an authentication system that utilises the characteristics and advantages of blockchain and smart contracts to authenticate users securely. The implemented system uses the email address, username, Ethereum address, password and data from a biometric reader to register and authenticate users. Experiments showed that the proposed method is secure and achieved performance improvement when compared to existing methods. The comparison of results with state-of-the-art showed that the proposed authentication system consumed up to 30% fewer resources in transaction and execution cost; however, there was an increase of up to 30% in miner fees.}, } @article {pmid35632264, year = {2022}, author = {Wu, TY and Meng, Q and Kumari, S and Zhang, P}, title = {Rotating behind Security: A Lightweight Authentication Protocol Based on IoT-Enabled Cloud Computing Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {10}, pages = {}, pmid = {35632264}, issn = {1424-8220}, abstract = {With the rapid development of technology based on the Internet of Things (IoT), numerous IoT devices are being used on a daily basis. The rise in cloud computing plays a crucial role in solving the resource constraints of IoT devices and in promoting resource sharing, whereby users can access IoT services provided in various environments. However, this complex and open wireless network environment poses security and privacy challenges. Therefore, designing a secure authentication protocol is crucial to protecting user privacy in IoT services. In this paper, a lightweight authentication protocol was designed for IoT-enabled cloud computing environments. A real or random model, and the automatic verification tool ProVerif were used to conduct a formal security analysis. Its security was further proved through an informal analysis. Finally, through security and performance comparisons, our protocol was confirmed to be relatively secure and to display a good performance.}, } @article {pmid35632161, year = {2022}, author = {Alnaim, AK and Alwakeel, AM and Fernandez, EB}, title = {Towards a Security Reference Architecture for NFV.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {10}, pages = {}, pmid = {35632161}, issn = {1424-8220}, support = {1443-001//Sensor Network and Cellular Systems Research Center (SNCS)/ ; }, mesh = {*Computers ; Reproducibility of Results ; *Software ; }, abstract = {Network function virtualization (NFV) is an emerging technology that is becoming increasingly important due to its many advantages. NFV transforms legacy hardware-based network infrastructure into software-based virtualized networks. This transformation increases the flexibility and scalability of networks, at the same time reducing the time for the creation of new networks. However, the attack surface of the network increases, which requires the definition of a clear map of where attacks may happen. ETSI standards precisely define many security aspects of this architecture, but these publications are very long and provide many details which are not of interest to software architects. We start by conducting threat analysis of some of the NFV use cases. The use cases serve as scenarios where the threats to the architecture can be enumerated. Representing threats as misuse cases that describe the modus operandi of attackers, we can find countermeasures to them in the form of security patterns, and we can build a security reference architecture (SRA). Until now, only imprecise models of NFV architectures existed; by making them more detailed and precise it is possible to handle not only security but also safety and reliability, although we do not explore those aspects. Because security is a global property that requires a holistic approach, we strongly believe that architectural models are fundamental to produce secure networks and allow us to build networks which are secure by design. The resulting SRA defines a roadmap to implement secure concrete architectures.}, } @article {pmid35632158, year = {2022}, author = {Makarichev, V and Lukin, V and Illiashenko, O and Kharchenko, V}, title = {Digital Image Representation by Atomic Functions: The Compression and Protection of Data for Edge Computing in IoT Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {10}, pages = {}, pmid = {35632158}, issn = {1424-8220}, support = {830943//European Commission/ ; }, abstract = {Digital images are used in various technological, financial, economic, and social processes. Huge datasets of high-resolution images require protected storage and low resource-intensive processing, especially when applying edge computing (EC) for designing Internet of Things (IoT) systems for industrial domains such as autonomous transport systems. For this reason, the problem of the development of image representation, which provides compression and protection features in combination with the ability to perform low complexity analysis, is relevant for EC-based systems. Security and privacy issues are important for image processing considering IoT and cloud architectures as well. To solve this problem, we propose to apply discrete atomic transform (DAT) that is based on a special class of atomic functions generalizing the well-known up-function of V.A. Rvachev. A lossless image compression algorithm based on DAT is developed, and its performance is studied for different structures of DAT. This algorithm, which combines low computational complexity, efficient lossless compression, and reliable protection features with convenient image representation, is the main contribution of the paper. It is shown that a sufficient reduction of memory expenses can be obtained. Additionally, a dependence of compression efficiency measured by compression ratio (CR) on the structure of DAT applied is investigated. It is established that the variation of DAT structure produces a minor variation of CR. A possibility to apply this feature to data protection and security assurance is grounded and discussed. In addition, a structure or file for storing the compressed and protected data is proposed, and its properties are considered. Multi-level structure for the application of atomic functions in image processing and protection for EC in IoT systems is suggested and analyzed.}, } @article {pmid35632088, year = {2022}, author = {Hossain, MD and Sultana, T and Hossain, MA and Layek, MA and Hossain, MI and Sone, PP and Lee, GW and Huh, EN}, title = {Dynamic Task Offloading for Cloud-Assisted Vehicular Edge Computing Networks: A Non-Cooperative Game Theoretic Approach.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {10}, pages = {}, pmid = {35632088}, issn = {1424-8220}, support = {IITP-2022-2015-0-00742//Ministry of Science and ICT, Korea/ ; }, abstract = {Vehicular edge computing (VEC) is one of the prominent ideas to enhance the computation and storage capabilities of vehicular networks (VNs) through task offloading. In VEC, the resource-constrained vehicles offload their computing tasks to the local road-side units (RSUs) for rapid computation. However, due to the high mobility of vehicles and the overloaded problem, VEC experiences a great deal of challenges when determining a location for processing the offloaded task in real time. As a result, this degrades the quality of vehicular performance. Therefore, to deal with these above-mentioned challenges, an efficient dynamic task offloading approach based on a non-cooperative game (NGTO) is proposed in this study. In the NGTO approach, each vehicle can make its own strategy on whether a task is offloaded to a multi-access edge computing (MEC) server or a cloud server to maximize its benefits. Our proposed strategy can dynamically adjust the task-offloading probability to acquire the maximum utility for each vehicle. However, we used a best response offloading strategy algorithm for the task-offloading game in order to achieve a unique and stable equilibrium. Numerous simulation experiments affirm that our proposed scheme fulfills the performance guarantees and can reduce the response time and task-failure rate by almost 47.6% and 54.6%, respectively, when compared with the local RSU computing (LRC) scheme. Moreover, the reduced rates are approximately 32.6% and 39.7%, respectively, when compared with a random offloading scheme, and approximately 26.5% and 28.4%, respectively, when compared with a collaborative offloading scheme.}, } @article {pmid35632024, year = {2022}, author = {Sepulveda, F and Thangraj, JS and Pulliam, J}, title = {The Edge of Exploration: An Edge Storage and Computing Framework for Ambient Noise Seismic Interferometry Using Internet of Things Based Sensor Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {10}, pages = {}, pmid = {35632024}, issn = {1424-8220}, support = {FOA DE-FOA-0001445//United States Department of Energy/ ; }, abstract = {Recent technological advances have reduced the complexity and cost of developing sensor networks for remote environmental monitoring. However, the challenges of acquiring, transmitting, storing, and processing remote environmental data remain significant. The transmission of large volumes of sensor data to a centralized location (i.e., the cloud) burdens network resources, introduces latency and jitter, and can ultimately impact user experience. Edge computing has emerged as a paradigm in which substantial storage and computing resources are located at the "edge" of the network. In this paper, we present an edge storage and computing framework leveraging commercially available components organized in a tiered architecture and arranged in a hub-and-spoke topology. The framework includes a popular distributed database to support the acquisition, transmission, storage, and processing of Internet-of-Things-based sensor network data in a field setting. We present details regarding the architecture, distributed database, embedded systems, and topology used to implement an edge-based solution. Lastly, a real-world case study (i.e., seismic) is presented that leverages the edge storage and computing framework to acquire, transmit, store, and process millions of samples of data per hour.}, } @article {pmid35629136, year = {2022}, author = {Silva, P and Dahlke, DV and Smith, ML and Charles, W and Gomez, J and Ory, MG and Ramos, KS}, title = {An Idealized Clinicogenomic Registry to Engage Underrepresented Populations Using Innovative Technology.}, journal = {Journal of personalized medicine}, volume = {12}, number = {5}, pages = {}, pmid = {35629136}, issn = {2075-4426}, abstract = {Current best practices in tumor registries provide a glimpse into a limited time frame over the natural history of disease, usually a narrow window around diagnosis and biopsy. This creates challenges meeting public health and healthcare reimbursement policies that increasingly require robust documentation of long-term clinical trajectories, quality of life, and health economics outcomes. These challenges are amplified for underrepresented minority (URM) and other disadvantaged populations, who tend to view the institution of clinical research with skepticism. Participation gaps leave such populations underrepresented in clinical research and, importantly, in policy decisions about treatment choices and reimbursement, thus further augmenting health, social, and economic disparities. Cloud computing, mobile computing, digital ledgers, tokenization, and artificial intelligence technologies are powerful tools that promise to enhance longitudinal patient engagement across the natural history of disease. These tools also promise to enhance engagement by giving participants agency over their data and addressing a major impediment to research participation. This will only occur if these tools are available for use with all patients. Distributed ledger technologies (specifically blockchain) converge these tools and offer a significant element of trust that can be used to engage URM populations more substantively in clinical research. This is a crucial step toward linking composite cohorts for training and optimization of the artificial intelligence tools for enhancing public health in the future. The parameters of an idealized clinical genomic registry are presented.}, } @article {pmid35627629, year = {2022}, author = {Li, J and Gong, J and Guldmann, JM and Yang, J and Zhang, Z}, title = {Simulation of Land-Use Spatiotemporal Changes under Ecological Quality Constraints: The Case of the Wuhan Urban Agglomeration Area, China, over 2020-2030.}, journal = {International journal of environmental research and public health}, volume = {19}, number = {10}, pages = {}, pmid = {35627629}, issn = {1660-4601}, mesh = {China ; Cities ; *Conservation of Natural Resources/methods ; *Ecosystem ; Forests ; Humans ; }, abstract = {Human activities coupled with land-use change pose a threat to the regional ecological environment. Therefore, it is essential to determine the future land-use structure and spatial layout for ecological protection and sustainable development. Land use simulations based on traditional scenarios do not fully consider ecological protection, leading to urban sprawl. Timely and dynamic monitoring of ecological status and change is vital to managing and protecting urban ecology and sustainable development. Remote sensing indices, including greenness, humidity, dryness, and heat, are calculated annually. This method compensates for data loss and difficulty in stitching remote sensing ecological indices over large-scale areas and long time-series. Herein, a framework is developed by integrating the four above-mentioned indices for a rapid, large-scale prediction of land use/cover that incorporates the protection of high ecological quality zone (HEQZ) land. The Google Earth Engine (GEE) platform is used to build a comprehensive HEQZ map of the Wuhan Urban Agglomeration Area (WUAA). Two scenarios are considered: Ecological protection (EP) based on HEQZ and natural growth (NG) without spatial ecological constraints. Land use/cover in the WUAA is predicted over 2020-2030, using the patch-generating land use simulation (PLUS) model. The results show that: (1) the HEQZ area covers 21,456 km[2], accounting for 24% of the WUAA, and is mainly distributed in the Xianning, Huangshi, and Xiantao regions. Construction land has the highest growth rate (5.2%) under the NG scenario. The cropland area decreases by 3.2%, followed by woodlands (0.62%). (2) By delineating the HEQZ, woodlands, rivers, lakes, and wetlands are well protected; construction land displays a downward trend based on the EP scenario with the HEQZ, and the simulated construction land in 2030 is located outside the HEQZ. (3) Image processing based on GEE cloud computing can ameliorate the difficulties of remote sensing data (i.e., missing data, cloudiness, chromatic aberration, and time inconsistency). The results of this study can provide essential scientific guidance for territorial spatial planning under the premise of ecological security.}, } @article {pmid35623334, year = {2022}, author = {Gutz, SE and Stipancic, KL and Yunusova, Y and Berry, JD and Green, JR}, title = {Validity of Off-the-Shelf Automatic Speech Recognition for Assessing Speech Intelligibility and Speech Severity in Speakers With Amyotrophic Lateral Sclerosis.}, journal = {Journal of speech, language, and hearing research : JSLHR}, volume = {65}, number = {6}, pages = {2128-2143}, pmid = {35623334}, issn = {1558-9102}, support = {F31 DC019016/DC/NIDCD NIH HHS/United States ; K24 DC016312/DC/NIDCD NIH HHS/United States ; R01 DC017291/DC/NIDCD NIH HHS/United States ; T32 DC000038/DC/NIDCD NIH HHS/United States ; }, mesh = {*Amyotrophic Lateral Sclerosis/complications ; Dysarthria/diagnosis/etiology ; Humans ; Reproducibility of Results ; Speech Disorders ; Speech Intelligibility ; *Speech Perception ; Speech Production Measurement/methods ; }, abstract = {PURPOSE: There is increasing interest in using automatic speech recognition (ASR) systems to evaluate impairment severity or speech intelligibility in speakers with dysarthria. We assessed the clinical validity of one currently available off-the-shelf (OTS) ASR system (i.e., a Google Cloud ASR API) for indexing sentence-level speech intelligibility and impairment severity in individuals with amyotrophic lateral sclerosis (ALS), and we provided guidance for potential users of such systems in research and clinic.

METHOD: Using speech samples collected from 52 individuals with ALS and 20 healthy control speakers, we compared word recognition rate (WRR) from the commercially available Google Cloud ASR API (Machine WRR) to clinician-provided judgments of impairment severity, as well as sentence intelligibility (Human WRR). We assessed the internal reliability of Machine and Human WRR by comparing the standard deviation of WRR across sentences to the minimally detectable change (MDC), a clinical benchmark that indicates whether results are within measurement error. We also evaluated Machine and Human WRR diagnostic accuracy for classifying speakers into clinically established categories.

RESULTS: Human WRR achieved better accuracy than Machine WRR when indexing speech severity, and, although related, Human and Machine WRR were not strongly correlated. When the speech signal was mixed with noise (noise-augmented ASR) to reduce a ceiling effect, Machine WRR performance improved. Internal reliability metrics were worse for Machine than Human WRR, particularly for typical and mildly impaired severity groups, although sentence length significantly impacted both Machine and Human WRRs.

CONCLUSIONS: Results indicated that the OTS ASR system was inadequate for early detection of speech impairment and grading overall speech severity. While Machine and Human WRR were correlated, ASR should not be used as a one-to-one proxy for transcription speech intelligibility or clinician severity ratings. Overall, findings suggested that the tested OTS ASR system, Google Cloud ASR, has limited utility for grading clinical speech impairment in speakers with ALS.}, } @article {pmid35622338, year = {2022}, author = {Christley, S and Stervbo, U and Cowell, LG and , }, title = {Immune Repertoire Analysis on High-Performance Computing Using VDJServer V1: A Method by the AIRR Community.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2453}, number = {}, pages = {439-446}, pmid = {35622338}, issn = {1940-6029}, mesh = {*Computing Methodologies ; High-Throughput Nucleotide Sequencing ; *Software ; Workflow ; }, abstract = {AIRR-seq data sets are usually large and require specialized analysis methods and software tools. A typical Illumina MiSeq sequencing run generates 20-30 million 2 × 300 bp paired-end sequence reads, which roughly corresponds to 15 GB of sequence data to be processed. Other platforms like NextSeq, which is useful in projects where the full V gene is not needed, create about 400 million 2 × 150 bp paired-end reads. Because of the size of the data sets, the analysis can be computationally expensive, particularly the early analysis steps like preprocessing and gene annotation that process the majority of the sequence data. A standard desktop PC may take 3-5 days of constant processing for a single MiSeq run, so dedicated high-performance computational resources may be required.VDJServer provides free access to high-performance computing (HPC) at the Texas Advanced Computing Center (TACC) through a graphical user interface (Christley et al. Front Immunol 9:976, 2018). VDJServer is a cloud-based analysis portal for immune repertoire sequence data that provides access to a suite of tools for a complete analysis workflow, including modules for preprocessing and quality control of sequence reads, V(D)J gene assignment, repertoire characterization, and repertoire comparison. Furthermore, VDJServer has parallelized execution for tools such as IgBLAST, so more compute resources are utilized as the size of the input data grows. Analysis that takes days on a desktop PC might take only a few hours on VDJServer. VDJServer is a free, publicly available, and open-source licensed resource. Here, we describe the workflow for performing immune repertoire analysis on VDJServer's high-performance computing.}, } @article {pmid35611115, year = {2022}, author = {Rudrapati, R}, title = {Using industrial 4.0 technologies to combat the COVID-19 pandemic.}, journal = {Annals of medicine and surgery (2012)}, volume = {78}, number = {}, pages = {103811}, pmid = {35611115}, issn = {2049-0801}, abstract = {The COVID 19 (Coronavirus) pandemic has led to a surge in the demand for healthcare devices, pre-cautions, or medicines along with advanced information technology. It has become a global mission to control the Coronavirus to prevent the death of innocent people. The fourth industrial revolution (I4.0) is a new approach to thinking that is proposed across a wide range of industries and services to achieve greater success and quality of life. Several initiatives associated with industry 4.0 are expected to make a difference in the fight against COVID-19. Implementing I4.0 components effectively could lead to a reduction in barriers between patients and healthcare workers and could result in improved communication between them. The present study aims to review the components of I4.0 and related tools used to combat the Coronavirus. This article highlights the benefits of each component of the I4.0, which is useful in controlling the spread of COVID-19. From the present study, it is stated that I4.0 technologies could provide an effective solution to deal with local as well as global medical crises in an innovative way.}, } @article {pmid35607467, year = {2022}, author = {Yang, Q}, title = {Analysis of English Cultural Teaching Model Based on Machine Learning.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7126758}, pmid = {35607467}, issn = {1687-5273}, mesh = {*Artificial Intelligence ; Humans ; Machine Learning ; Pandemics ; *Students ; Surveys and Questionnaires ; }, abstract = {According to the world population, nearly five billion people use mobile phones in their daily lives, and this has increased by 20% in the last twelve months compared to the previous report. An average survey conducted by researchers to find the amount of data consumed in a month by every mobile phone in the world has finally resulted in 45 exabytes of data being collected from a single user within a month. In today's world, data consumption and data analytics are being considered as one of the most important necessities for e-commerce companies. With the help of such collected data from a person, it is possible to predict the future signature or activity of the person. If 45 terabytes of data can be stored for a single user, determining the average calculation and amount of data to be collected for five billion users appears to be much more difficult. More than the human working concept, it looks like it would be difficult for a traditional computer system to handle this amount of data. To study and understand a concept from machine learning and artificial intelligence requires quite a collection of data to predict according to a person's activity. This article explains the roles of faculty and students, as well as the requirements for academic evaluation. Even before the pandemic, most people did not have any idea about the online teaching model. It is only after the disability of conducting direct (offline) classes that people are forced to get into the online world of teaching. Nearly 60% of countries are trying to convert their education systems to such online models, which improve communication between students and teachers and also enable different schemes for students. Big data can be considered as one of the technological revolutions in information technology companies that became popular after the crisis of cloud computing. A support vector machine (SVM) is proposed for analyzing English culture teaching and is compared with the traditional fuzzy logic. The results show the proposed model achieves an accuracy of 98%, which is 5% higher than the existing algorithm.}, } @article {pmid35607465, year = {2022}, author = {Li, X}, title = {5G Converged Network Resource Allocation Strategy Based on Reinforcement Learning in Edge Cloud Computing Environment.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6174708}, pmid = {35607465}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; Computers ; Resource Allocation ; }, abstract = {Aiming at the problem that computing power and resources of Mobile Edge Computing (MEC) servers are difficult to process long-period intensive task data, this study proposes a 5G converged network resource allocation strategy based on reinforcement learning in edge cloud computing environment. n order to solve the problem of insufficient local computing power, the proposed strategy offloads some tasks to the edge of network. Firstly, we build a multi-MEC server and multi-user mobile edge system, and design optimization objectives to minimize the average response time of system tasks and total energy consumption. Then, task offloading and resource allocation process is modeled as Markov decision process. Furthermore, the deep Q-network is used to find the optimal resource allocation scheme. Finally, the proposed strategy is analyzed experimentally based on TensorFlow learning framework. Experimental results show that when the number of users is 110, final energy consumption is about 2500 J, which effectively reduces task delay and improves the utilization of resources.}, } @article {pmid35607458, year = {2022}, author = {Li, J}, title = {Study on Integration and Application of Artificial Intelligence and Wireless Network in Piano Music Teaching.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8745833}, pmid = {35607458}, issn = {1687-5273}, mesh = {Algorithms ; *Artificial Intelligence ; Humans ; *Music ; Students ; Technology ; Universities ; }, abstract = {Until 2019, most people had never faced the situation that would be their life-changing moment. Most universities are conducting classes for their students with the help of virtual classrooms indicating massive technological growth. However, this development does not take enough time to reach the students and the teaching person. Within five to six months of successful projects, most application producers have launched their official sites to conduct online classes and test ways for students. The introduction of virtual classes is not the only example of technological advancement; cloud computing, artificial intelligence, and deep learning have collaborated to produce appropriate, fine, and less error-prone results in all such fields of teaching. These technological advancements have given way to design models created with the wireless networks that are being made, particularly for music-related courses. The Quality-Learning (Q-Learning) Algorithm (QLA) is a pillar study for improving the implementation of artificial intelligence in music teaching in this research. The proposed algorithm aids in improving the accuracy of music, its frequency, and its wavelength when it passes. The proposed QLA is compared with the existing K-Nearest Neighbour (KNN) algorithm, and the results show that QLA has achieved 99.23% accuracy in intelligent piano music teaching through wireless network mode.}, } @article {pmid35605202, year = {2022}, author = {Lewsey, MG and Yi, C and Berkowitz, O and Ayora, F and Bernado, M and Whelan, J}, title = {scCloudMine: A cloud-based app for visualization, comparison, and exploration of single-cell transcriptomic data.}, journal = {Plant communications}, volume = {3}, number = {4}, pages = {100302}, pmid = {35605202}, issn = {2590-3462}, mesh = {Cloud Computing ; Hormones ; *Mobile Applications ; Sequence Analysis, RNA ; Single-Cell Analysis ; *Transcriptome ; }, abstract = {scCloudMine is a cloud-based application for visualization, comparison, and exploration of single-cell transcriptome data. It does not require an on-site, high-power computing server, installation, or associated expertise and expense. Users upload their own or publicly available scRNA-seq datasets after pre-processing for visualization using a web browser. The data can be viewed in two color modes-Cluster, representing cell identity, and Values, showing levels of expression-and data can be queried using keywords or gene identification number(s). Using the app to compare studies, we determined that some genes frequently used as cell-type markers are in fact study specific. The apparent cell-specific expression of PHO1;H3 differed between GFP-tagging and scRNA-seq studies. Some phosphate transporter genes were induced by protoplasting, but they retained cell specificity, suggesting that cell-specific responses to stress (i.e., protoplasting) can occur. Examination of the cell specificity of hormone response genes revealed that 132 hormone-responsive genes display restricted expression and that the jasmonate response gene TIFY8 is expressed in endodermal cells, in contrast to previous reports. It also appears that JAZ repressors have cell-type-specific functions. These features identified using scCloudMine highlight the need for resources to enable biological researchers to compare their datasets of interest under a variety of parameters. scCloudMine enables researchers to form new hypotheses and perform comparative studies and allows for the easy re-use of data from this emerging technology by a wide variety of users who may not have access or funding for high-performance on-site computing and support.}, } @article {pmid35602625, year = {2022}, author = {Ye, Q and Wang, M and Meng, H and Xia, F and Yan, X}, title = {Efficient Linkable Ring Signature Scheme over NTRU Lattice with Unconditional Anonymity.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8431874}, pmid = {35602625}, issn = {1687-5273}, mesh = {*Algorithms ; *Computer Security ; }, abstract = {In cloud and edge computing, senders of data often want to be anonymous, while recipients of data always expect that the data come from a reliable sender and they are not redundant. Linkable ring signature (LRS) can not only protect the anonymity of the signer, but also detect whether two different signatures are signed by the same signer. Today, most lattice-based LRS schemes only satisfy computational anonymity. To the best of our knowledge, only the lattice-based LRS scheme proposed by Torres et al. can achieve unconditional anonymity. But the efficiency of signature generation and verification of the scheme is very low, and the signature length is also relatively long. With the preimage sampling, trapdoor generation, and rejection sampling algorithms, this study proposed an efficient LRS scheme with unconditional anonymity based on the e-NTRU problem under the random oracle model. We implemented our scheme and Torres et al.'s scheme, as well as other four efficient lattice-based LRS schemes. It is shown that under the same security level, compared with Torres et al.'s scheme, the signature generation time, signature verification time, and signature size of our scheme are reduced by about 94.52%, 97.18%, and 58.03%, respectively.}, } @article {pmid35602318, year = {2023}, author = {Mansour, RF and Alhumyani, H and Khalek, SA and Saeed, RA and Gupta, D}, title = {Design of cultural emperor penguin optimizer for energy-efficient resource scheduling in green cloud computing environment.}, journal = {Cluster computing}, volume = {26}, number = {1}, pages = {575-586}, pmid = {35602318}, issn = {1386-7857}, abstract = {In recent times, energy related issues have become challenging with the increasing size of data centers. Energy related issues problems are becoming more and more serious with the growing size of data centers. Green cloud computing (GCC) becomes a recent computing platform which aimed to handle energy utilization in cloud data centers. Load balancing is generally employed to optimize resource usage, throughput, and delay. Aiming at the reduction of energy utilization at the data centers of GCC, this paper designs an energy efficient resource scheduling using Cultural emperor penguin optimizer (CEPO) algorithm, called EERS-CEPO in GCC environment. The proposed model is aimed to distribute work load amongst several data centers or other resources and thereby avoiding overload of individual resources. The CEPO algorithm is designed based on the fusion of cultural algorithm (CA) and emperor penguin optimizer (EPO), which boosts the exploitation capabilities of EPO algorithm using the CA, shows the novelty of the work. The EERS-CEPO algorithm has derived a fitness function to optimally schedule the resources in data centers, minimize the operational and maintenance cost of the GCC, and thereby decrease the energy utilization and heat generation. To ensure the improvised performance of the EERS-CEPO algorithm, a wide range of experiments is performed and the experimental outcomes highlighted the better performance over the recent state of art techniques.}, } @article {pmid35592460, year = {2022}, author = {Doyen, S and Dadario, NB}, title = {12 Plagues of AI in Healthcare: A Practical Guide to Current Issues With Using Machine Learning in a Medical Context.}, journal = {Frontiers in digital health}, volume = {4}, number = {}, pages = {765406}, pmid = {35592460}, issn = {2673-253X}, abstract = {The healthcare field has long been promised a number of exciting and powerful applications of Artificial Intelligence (AI) to improve the quality and delivery of health care services. AI techniques, such as machine learning (ML), have proven the ability to model enormous amounts of complex data and biological phenomena in ways only imaginable with human abilities alone. As such, medical professionals, data scientists, and Big Tech companies alike have all invested substantial time, effort, and funding into these technologies with hopes that AI systems will provide rigorous and systematic interpretations of large amounts of data that can be leveraged to augment clinical judgments in real time. However, despite not being newly introduced, AI-based medical devices have more than often been limited in their true clinical impact that was originally promised or that which is likely capable, such as during the current COVID-19 pandemic. There are several common pitfalls for these technologies that if not prospectively managed or adjusted in real-time, will continue to hinder their performance in high stakes environments outside of the lab in which they were created. To address these concerns, we outline and discuss many of the problems that future developers will likely face that contribute to these failures. Specifically, we examine the field under four lenses: approach, data, method and operation. If we continue to prospectively address and manage these concerns with reliable solutions and appropriate system processes in place, then we as a field may further optimize the clinical applicability and adoption of medical based AI technology moving forward.}, } @article {pmid35591261, year = {2022}, author = {Jiang, Y and Wu, S and Mo, Q and Liu, W and Wei, X}, title = {A Cloud-Computing-Based Portable Networked Ground Station System for Microsatellites.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {9}, pages = {}, pmid = {35591261}, issn = {1424-8220}, mesh = {*Cloud Computing ; *Microsatellite Repeats/genetics ; }, abstract = {Microsatellites have attracted a large number of scholars and engineers because of their portability and distribution characteristics. The ground station suitable for microsatellite service has become an important research topic. In this paper, we propose a networked ground station and verify it on our own microsatellite. The specific networked ground station system consists of multiple ground nodes. They can work together to complete data transmission tasks with higher efficiency. After describing our microsatellite project, a reasonable distribution of ground nodes is given. A cloud computing model is used to realize the coordination of multiple ground nodes. An adaptive communication system between satellites and ground stations is used to increase link efficiency. Extensive on-orbit experiments were used to validate our design. The experimental results show that our networked ground station has excellent performance in data transmission capability. Finally, the specific cloud-computing-based ground station network successfully completes our satellite mission.}, } @article {pmid35591112, year = {2022}, author = {Zhang, J and Li, M and Zheng, X and Hsu, CH}, title = {A Time-Driven Cloudlet Placement Strategy for Workflow Applications in Wireless Metropolitan Area Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {9}, pages = {}, pmid = {35591112}, issn = {1424-8220}, support = {2020B0101090005//the Key-Area Research and Development Program of 502 Guangdong Province under Grant/ ; }, mesh = {*Algorithms ; *Cloud Computing ; Computers ; Computers, Handheld ; Workflow ; }, abstract = {With the rapid development of mobile technology, mobile applications have increasing requirements for computational resources, and mobile devices can no longer meet these requirements. Mobile edge computing (MEC) has emerged in this context and has brought innovation into the working mode of traditional cloud computing. By provisioning edge server placement, the computing power of the cloud center is distributed to the edge of the network. The abundant computational resources of edge servers compensate for the lack of mobile devices and shorten the communication delay between servers and users. Constituting a specific form of edge servers, cloudlets have been widely studied within academia and industry in recent years. However, existing studies have mainly focused on computation offloading for general computing tasks under fixed cloudlet placement positions. They ignored the impact on computation offloading results from cloudlet placement positions and data dependencies among mobile application components. In this paper, we study the cloudlet placement problem based on workflow applications (WAs) in wireless metropolitan area networks (WMANs). We devise a cloudlet placement strategy based on a particle swarm optimization algorithm using genetic algorithm operators with the encoding library updating mode (PGEL), which enables the cloudlet to be placed in appropriate positions. The simulation results show that the proposed strategy can obtain a near-optimal cloudlet placement scheme. Compared with other classic algorithms, this algorithm can reduce the execution time of WAs by 15.04-44.99%.}, } @article {pmid35591011, year = {2022}, author = {Barbeau, M and Garcia-Alfaro, J and Kranakis, E}, title = {Research Trends in Collaborative Drones.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {9}, pages = {}, pmid = {35591011}, issn = {1424-8220}, mesh = {Forecasting ; *Technology ; *Unmanned Aerial Devices ; }, abstract = {The last decade has seen an explosion of interest in drones-introducing new networking technologies, such as 5G wireless connectivity and cloud computing. The resulting advancements in communication capabilities are already expanding the ubiquitous role of drones as primary solution enablers, from search and rescue missions to information gathering and parcel delivery. Their numerous applications encompass all aspects of everyday life. Our focus is on networked and collaborative drones. The available research literature on this topic is vast. No single survey article could do justice to all critical issues. Our goal in this article is not to cover everything and include everybody but rather to offer a personal perspective on a few selected research topics that might lead to fruitful future investigations that could play an essential role in developing drone technologies. The topics we address include distributed computing with drones for the management of anonymity, countering threats posed by drones, target recognition, navigation under uncertainty, risk avoidance, and cellular technologies. Our approach is selective. Every topic includes an explanation of the problem, a discussion of a potential research methodology, and ideas for future research.}, } @article {pmid35586098, year = {2022}, author = {Li, T and Zhao, H and Tao, Y and Huang, D and Yang, C and Xu, S}, title = {Power Intelligent Terminal Intrusion Detection Based on Deep Learning and Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {1415713}, pmid = {35586098}, issn = {1687-5273}, mesh = {*Cloud Computing ; Computer Security ; Data Collection ; *Deep Learning ; Information Storage and Retrieval ; }, abstract = {Numerous internal and external intrusion attacks have appeared one after another, which has become a major problem affecting the normal operation of the power system. The power system is the infrastructure of the national economy, ensuring that the information security of its network not only is an aspect of computer information security but also must consider high-standard security requirements. This paper analyzes the intrusion threat brought by the power information network and conducts in-depth research and investigation combined with the intrusion detection technology of the power information network. It analyzes the structure of the power knowledge network and cloud computing through deep learning-based methods and provides a network interference detection model. The model combines the methods of abuse detection and anomaly detection, which solves the problem that the abuse analysis model does not detect new attack variants. At the same time, for big data network data retrieval, it retrieves and analyzes data flow quickly and accurately with the help of deep learning of data components. It uses a fuzzy integral method to optimize the accuracy of power information network intrusion prediction, and the accuracy reaches 98.11%, with an increase of 0.6%.}, } @article {pmid35585733, year = {2022}, author = {Aloraini, T and Aljouie, A and Alniwaider, R and Alharbi, W and Alsubaie, L and AlTuraif, W and Qureshi, W and Alswaid, A and Eyiad, W and Al Mutairi, F and Ababneh, F and Alfadhel, M and Alfares, A}, title = {The variant artificial intelligence easy scoring (VARIES) system.}, journal = {Computers in biology and medicine}, volume = {145}, number = {}, pages = {105492}, doi = {10.1016/j.compbiomed.2022.105492}, pmid = {35585733}, issn = {1879-0534}, mesh = {*Artificial Intelligence ; Humans ; *Machine Learning ; Software ; }, abstract = {PURPOSE: Medical artificial intelligence (MAI) is artificial intelligence (AI) applied to the healthcare field. AI can be applied to many different aspects of genetics, such as variant classification. With little or no prior experience in AI coding, we share our experience with variant classification using the Variant Artificial Intelligence Easy Scoring (VARIES), an open-access platform, and the Automatic Machine Learning (AutoML) of the Google Cloud Platform.

METHODS: We investigated exome sequencing data from a sample of 1410 individuals. The majority (80%) were used for training and 20% for testing. The user-friendly Google Cloud Platform was used to create the VARIES model, and the TRIPOD checklist to develop and validate the prediction model for the development of the VARIES system.

RESULTS: The learning rate of the training dataset reached optimal results at an early stage of iteration, with a loss value near zero in approximately 4 min. For the testing dataset, the results for F1 (micro average) was 0.64, F1 (macro average) 0.34, micro-average area under the curve AUC (one-over-rest) 0.81 and the macro-average AUC (one-over-rest) 0.73. The overall performance characteristics of the VARIES model suggest the classifier has a high predictive ability.

CONCLUSION: We present a systematic guideline to create a genomic AI prediction tool with high predictive power, using a graphical user interface provided by Google Cloud Platform, with no prior experience in creating the software programs required.}, } @article {pmid35580808, year = {2022}, author = {Wallace, G and Polcyn, S and Brooks, PP and Mennen, AC and Zhao, K and Scotti, PS and Michelmann, S and Li, K and Turk-Browne, NB and Cohen, JD and Norman, KA}, title = {RT-Cloud: A cloud-based software framework to simplify and standardize real-time fMRI.}, journal = {NeuroImage}, volume = {257}, number = {}, pages = {119295}, pmid = {35580808}, issn = {1095-9572}, support = {RF1 MH125318/MH/NIMH NIH HHS/United States ; UL1 TR001863/TR/NCATS NIH HHS/United States ; }, mesh = {*Cloud Computing ; Humans ; Magnetic Resonance Imaging ; *Neurofeedback ; Software ; }, abstract = {Real-time fMRI (RT-fMRI) neurofeedback has been shown to be effective in treating neuropsychiatric disorders and holds tremendous promise for future breakthroughs, both with regard to basic science and clinical applications. However, the prevalence of its use has been hampered by computing hardware requirements, the complexity of setting up and running an experiment, and a lack of standards that would foster collaboration. To address these issues, we have developed RT-Cloud (https://github.com/brainiak/rt-cloud), a flexible, cloud-based, open-source Python software package for the execution of RT-fMRI experiments. RT-Cloud uses standardized data formats and adaptable processing streams to support and expand open science in RT-fMRI research and applications. Cloud computing is a key enabling technology for advancing RT-fMRI because it eliminates the need for on-premise technical expertise and high-performance computing; this allows installation, configuration, and maintenance to be automated and done remotely. Furthermore, the scalability of cloud computing makes it easier to deploy computationally-demanding multivariate analyses in real time. In this paper, we describe how RT-Cloud has been integrated with open standards, including the Brain Imaging Data Structure (BIDS) standard and the OpenNeuro database, how it has been applied thus far, and our plans for further development and deployment of RT-Cloud in the coming years.}, } @article {pmid35578669, year = {2022}, author = {Ahmad, S and Mehfuz, S and Mebarek-Oudina, F and Beg, J}, title = {RSM analysis based cloud access security broker: a systematic literature review.}, journal = {Cluster computing}, volume = {25}, number = {5}, pages = {3733-3763}, pmid = {35578669}, issn = {1386-7857}, abstract = {A Cloud Access Security Broker (CASB) is a security enforcement point or cloud-based software that is placed between cloud service users and cloud applications of cloud computing (CC) which is used to run the dimensionality, heterogeneity, and ambiguity correlated with cloud services. They permit the organization to amplify the reach of their security approaches past their claim framework to third-party computer programs and storage. In contrast to other systematic literature reviews (SLR), this one is directed at the client setting. To identify and evaluate methods to understand CASB, the SLR discusses the literature, citing a comprehension of the state-of-the-art and innovative characterization to describe. An SLR was performed to compile CASB related experiments and analyze how CASBs are designed and formed. These studies are then analyzed from different contexts, like motivation, usefulness, building approach, and decision method. The SLR has discussed the contrasts present between the studies and implementations, with planning accomplishments conducted with combinations of market-based courses of action, simulation tools, middleware's, etc. Search words with the keywords, which were extracted from the Research Questions (RQs), were utilized to recognize the essential consideration from the journal papers, conference papers, workshops, and symposiums. This SLR has distinguished 20 particular studies distributed from 2011 to 2021. Chosen studies were evaluated concurring to the defined RQs for their eminence and scope to particular CASB in this way recognizing a few gaps within the literature. Unlike other studies, this one concentrates on the customer's viewpoint. The survey uses a systematic analysis of the literature to discover and classify techniques for realizing CASB, resulting in a comprehensive grasp of the state-of-the-art and a novel taxonomy to describe CASBs. To assemble studies relating to CASB and investigate how CASB are engineered, a systematic literature review was done. These investigations are then evaluated from a variety of angles, including motivation, functionality, engineering approach, and methodology. Engineering efforts were directed at a combination of "market-based solutions", "middlewares", "toolkits", "algorithms", "semantic frameworks", and "conceptual frameworks", according to the study, which noted disparities in the studies' implementations. For further understanding, the different independent parameters influencing the CASB are studied using PCA (Principal Component Analysis). The outcome of their analysis was the identification of five parameters influencing the PCA analysis. The experimental results were used as input for Research Surface Methodology (RSM) to obtain an empirical model. For this, five-level coding was employed for developing the model and considered three dependent parameters and four center values. For more understanding of these independent variables' influence, on the CASB study, RSM analysis was employed. It was observed from the CCD (Central Composite Design) model that the actual values show significant influence with R[2] = 0.90. This wide investigation reveals that CASB is still in a formative state. Even though vital advancement has been carried out in this zone, obvious challenges stay to be tended to, which have been highlighted in this paper.}, } @article {pmid35577816, year = {2022}, author = {Wimberly, MC and Nekorchuk, DM and Kankanala, RR}, title = {Cloud-based applications for accessing satellite Earth observations to support malaria early warning.}, journal = {Scientific data}, volume = {9}, number = {1}, pages = {208}, pmid = {35577816}, issn = {2052-4463}, support = {R01AI079411//U.S. Department of Health & Human Services | NIH | National Institute of Allergy and Infectious Diseases (NIAID)/ ; R01AI079411//U.S. Department of Health & Human Services | NIH | National Institute of Allergy and Infectious Diseases (NIAID)/ ; R01AI079411//U.S. Department of Health & Human Services | NIH | National Institute of Allergy and Infectious Diseases (NIAID)/ ; }, mesh = {Animals ; Climate ; Cloud Computing ; Earth, Planet ; Ethiopia/epidemiology ; *Malaria/prevention & control ; *Software ; }, abstract = {Malaria epidemics can be triggered by fluctuations in temperature and precipitation that influence vector mosquitoes and the malaria parasite. Identifying and monitoring environmental risk factors can thus provide early warning of future outbreaks. Satellite Earth observations provide relevant measurements, but obtaining these data requires substantial expertise, computational resources, and internet bandwidth. To support malaria forecasting in Ethiopia, we developed software for Retrieving Environmental Analytics for Climate and Health (REACH). REACH is a cloud-based application for accessing data on land surface temperature, spectral indices, and precipitation using the Google Earth Engine (GEE) platform. REACH can be implemented using the GEE code editor and JavaScript API, as a standalone web app, or as package with the Python API. Users provide a date range and data for 852 districts in Ethiopia are automatically summarized and downloaded as tables. REACH was successfully used in Ethiopia to support a pilot malaria early warning project in the Amhara region. The software can be extended to new locations and modified to access other environmental datasets through GEE.}, } @article {pmid35571870, year = {2022}, author = {Rahman, MM and Khatun, F and Sami, SI and Uzzaman, A}, title = {The evolving roles and impacts of 5G enabled technologies in healthcare: The world epidemic COVID-19 issues.}, journal = {Array (New York, N.Y.)}, volume = {14}, number = {}, pages = {100178}, pmid = {35571870}, issn = {2590-0056}, abstract = {The latest 5G technology is being introduced the Internet of Things (IoT) Era. The study aims to focus the 5G technology and the current healthcare challenges as well as to highlight 5G based solutions that can handle the COVID-19 issues in different arenas. This paper provides a comprehensive review of 5G technology with the integration of other digital technologies (like AI and machine learning, IoT objects, big data analytics, cloud computing, robotic technology, and other digital platforms) in emerging healthcare applications. From the literature, it is clear that the promising aspects of 5G (such as super-high speed, high throughput, low latency) have a prospect in healthcare advancement. Now healthcare is being adopted 5G-based technologies to aid improved health services, more effective medical research, enhanced quality of life, better experiences of medical professionals and patients in anywhere-anytime. This paper emphasizes the evolving roles of 5G technology for handling the epidemiological challenges. The study also discusses various technological challenges and prospective for developing 5G powered healthcare solutions. Further works will incorporate more studies on how to expand 5G-based digital society as well as to resolve the issues of safety-security-privacy and availability-accessibility-integrity in future health crises.}, } @article {pmid35566391, year = {2022}, author = {Tang, S and Chen, R and Lin, M and Lin, Q and Zhu, Y and Ding, J and Hu, H and Ling, M and Wu, J}, title = {Accelerating AutoDock Vina with GPUs.}, journal = {Molecules (Basel, Switzerland)}, volume = {27}, number = {9}, pages = {}, pmid = {35566391}, issn = {1420-3049}, support = {61872198//National Natural Science Foundation of China/ ; 81771478//National Natural Science Foundation of China/ ; 61971216//National Natural Science Foundation of China/ ; BK20201378//Basic Research Program of Science and Technology Depart- 369 ment of Jiangsu Province/ ; }, mesh = {Algorithms ; *Drug Discovery ; Ligands ; Molecular Docking Simulation ; *Software ; }, abstract = {AutoDock Vina is one of the most popular molecular docking tools. In the latest benchmark CASF-2016 for comparative assessment of scoring functions, AutoDock Vina won the best docking power among all the docking tools. Modern drug discovery is facing a common scenario of large virtual screening of drug hits from huge compound databases. Due to the seriality characteristic of the AutoDock Vina algorithm, there is no successful report on its parallel acceleration with GPUs. Current acceleration of AutoDock Vina typically relies on the stack of computing power as well as the allocation of resource and tasks, such as the VirtualFlow platform. The vast resource expenditure and the high access threshold of users will greatly limit the popularity of AutoDock Vina and the flexibility of its usage in modern drug discovery. In this work, we proposed a new method, Vina-GPU, for accelerating AutoDock Vina with GPUs, which is greatly needed for reducing the investment for large virtual screens and also for wider application in large-scale virtual screening on personal computers, station servers or cloud computing, etc. Our proposed method is based on a modified Monte Carlo using simulating annealing AI algorithm. It greatly raises the number of initial random conformations and reduces the search depth of each thread. Moreover, a classic optimizer named BFGS is adopted to optimize the ligand conformations during the docking progress, before a heterogeneous OpenCL implementation was developed to realize its parallel acceleration leveraging thousands of GPU cores. Large benchmark tests show that Vina-GPU reaches an average of 21-fold and a maximum of 50-fold docking acceleration against the original AutoDock Vina while ensuring their comparable docking accuracy, indicating its potential for pushing the popularization of AutoDock Vina in large virtual screens.}, } @article {pmid35558165, year = {2022}, author = {Porter, SJ and Hook, DW}, title = {Connecting Scientometrics: Dimensions as a Route to Broadening Context for Analyses.}, journal = {Frontiers in research metrics and analytics}, volume = {7}, number = {}, pages = {835139}, pmid = {35558165}, issn = {2504-0537}, abstract = {Modern cloud-based data infrastructures open new vistas for the deployment of scientometric data into the hands of practitioners. These infrastructures lower barriers to entry by making data more available and compute capacity more affordable. In addition, if data are prepared appropriately, with unique identifiers, it is possible to connect many different types of data. Bringing broader world data into the hands of practitioners (policymakers, strategists, and others) who use scientometrics as a tool can extend their capabilities. These ideas are explored through connecting Dimensions and World Bank data on Google BigQuery to study international collaboration between countries of different economic classification.}, } @article {pmid35552142, year = {2023}, author = {Luo, C and Wang, S and Li, T and Chen, H and Lv, J and Yi, Z}, title = {Large-Scale Meta-Heuristic Feature Selection Based on BPSO Assisted Rough Hypercuboid Approach.}, journal = {IEEE transactions on neural networks and learning systems}, volume = {34}, number = {12}, pages = {10889-10903}, doi = {10.1109/TNNLS.2022.3171614}, pmid = {35552142}, issn = {2162-2388}, abstract = {The selection of prominent features for building more compact and efficient models is an important data preprocessing task in the field of data mining. The rough hypercuboid approach is an emerging technique that can be applied to eliminate irrelevant and redundant features, especially for the inexactness problem in approximate numerical classification. By integrating the meta-heuristic-based evolutionary search technique, a novel global search method for numerical feature selection is proposed in this article based on the hybridization of the rough hypercuboid approach and binary particle swarm optimization (BPSO) algorithm, namely RH-BPSO. To further alleviate the issue of high computational cost when processing large-scale datasets, parallelization approaches for calculating the hybrid feature evaluation criteria are presented by decomposing and recombining hypercuboid equivalence partition matrix via horizontal data partitioning. A distributed meta-heuristic optimized rough hypercuboid feature selection (DiRH-BPSO) algorithm is thus developed and embedded in the Apache Spark cloud computing model. Extensive experimental results indicate that RH-BPSO is promising and can significantly outperform the other representative feature selection algorithms in terms of classification accuracy, the cardinality of the selected feature subset, and execution efficiency. Moreover, experiments on distributed-memory multicore clusters show that DiRH-BPSO is significantly faster than its sequential counterpart and is perfectly capable of completing large-scale feature selection tasks that fail on a single node due to memory constraints. Parallel scalability and extensibility analysis also demonstrate that DiRH-BPSO could scale out and extend well with the growth of computational nodes and the volume of data.}, } @article {pmid35548309, year = {2022}, author = {Jiang, F and Deng, M and Long, Y and Sun, H}, title = {Spatial Pattern and Dynamic Change of Vegetation Greenness From 2001 to 2020 in Tibet, China.}, journal = {Frontiers in plant science}, volume = {13}, number = {}, pages = {892625}, pmid = {35548309}, issn = {1664-462X}, abstract = {Due to the cold climate and dramatically undulating altitude, the identification of dynamic vegetation trends and main drivers is essential to maintain the ecological balance in Tibet. The normalized difference vegetation index (NDVI), as the most commonly used greenness index, can effectively evaluate vegetation health and spatial patterns. MODIS-NDVI (Moderate-resolution Imaging Spectroradiometer-NDVI) data for Tibet from 2001 to 2020 were obtained and preprocessed on the Google Earth Engine (GEE) cloud platform. The Theil-Sen median method and Mann-Kendall test method were employed to investigate dynamic NDVI changes, and the Hurst exponent was used to predict future vegetation trends. In addition, the main drivers of NDVI changes were analyzed. The results indicated that (1) the vegetation NDVI in Tibet significantly increased from 2001 to 2020, and the annual average NDVI value fluctuated between 0.31 and 0.34 at an increase rate of 0.0007 year[-1]; (2) the vegetation improvement area accounted for the largest share of the study area at 56.6%, followed by stable unchanged and degraded areas, with proportions of 27.5 and 15.9%, respectively. The overall variation coefficient of the NDVI in Tibet was low, with a mean value of 0.13; (3) The mean value of the Hurst exponent was 0.53, and the area of continuously improving regions accounted for 41.2% of the study area, indicating that the vegetation change trend was continuous in most areas; (4) The NDVI in Tibet indicated a high degree of spatial agglomeration. However, there existed obvious differences in the spatial distribution of NDVI aggregation areas, and the aggregation types mainly included the high-high and low-low types; and (5) Precipitation and population growth significantly contributed to vegetation cover improvement in western Tibet. In addition, the use of the GEE to obtain remote sensing data combined with time-series data analysis provides the potential to quickly obtain large-scale vegetation change trends.}, } @article {pmid35535371, year = {2022}, author = {Lee, SH and Park, J and Yang, K and Min, J and Choi, J}, title = {Accuracy of Cloud-Based Speech Recognition Open Application Programming Interface for Medical Terms of Korean.}, journal = {Journal of Korean medical science}, volume = {37}, number = {18}, pages = {e144}, pmid = {35535371}, issn = {1598-6357}, mesh = {Cloud Computing ; Communication ; Humans ; Software ; *Speech ; *Speech Perception ; }, abstract = {BACKGROUND: There are limited data on the accuracy of cloud-based speech recognition (SR) open application programming interfaces (APIs) for medical terminology. This study aimed to evaluate the medical term recognition accuracy of current available cloud-based SR open APIs in Korean.

METHODS: We analyzed the SR accuracy of currently available cloud-based SR open APIs using real doctor-patient conversation recordings collected from an outpatient clinic at a large tertiary medical center in Korea. For each original and SR transcription, we analyzed the accuracy rate of each cloud-based SR open API (i.e., the number of medical terms in the SR transcription per number of medical terms in the original transcription).

RESULTS: A total of 112 doctor-patient conversation recordings were converted with three cloud-based SR open APIs (Naver Clova SR from Naver Corporation; Google Speech-to-Text from Alphabet Inc.; and Amazon Transcribe from Amazon), and each transcription was compared. Naver Clova SR (75.1%) showed the highest accuracy with the recognition of medical terms compared to the other open APIs (Google Speech-to-Text, 50.9%, P < 0.001; Amazon Transcribe, 57.9%, P < 0.001), and Amazon Transcribe demonstrated higher recognition accuracy compared to Google Speech-to-Text (P < 0.001). In the sub-analysis, Naver Clova SR showed the highest accuracy in all areas according to word classes, but the accuracy of words longer than five characters showed no statistical differences (Naver Clova SR, 52.6%; Google Speech-to-Text, 56.3%; Amazon Transcribe, 36.6%).

CONCLUSION: Among three current cloud-based SR open APIs, Naver Clova SR which manufactured by Korean company showed highest accuracy of medical terms in Korean, compared to Google Speech-to-Text and Amazon Transcribe. Although limitations are existing in the recognition of medical terminology, there is a lot of rooms for improvement of this promising technology by combining strengths of each SR engines.}, } @article {pmid35535196, year = {2022}, author = {Chai, M}, title = {Design of Rural Human Resource Management Platform Integrating IoT and Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4133048}, pmid = {35535196}, issn = {1687-5273}, mesh = {*Cloud Computing ; Humans ; *Technology ; Workforce ; }, abstract = {With the advent of the Internet of Things era, these hot technologies such as distributed, parallel computing, network storage, and load balancing can provide a good application foundation for the Internet of Things, enabling real-time dynamic management and intelligent analysis of hundreds of millions of items in the Internet of Things to be possible. The Internet of Things has changed from a concept to a reality, quickly reaching every corner of society. On the other hand, with the enhancement of the mobility of social talents, the file management of the talent service center is becoming more and more difficult. However, the traditional management methods of human resources files have problems such as poor resource sharing, asymmetric resources, and heterogeneous information sharing, which can no longer meet the needs of both the supply and demand sides of human resources with diversified and multiple organizational structures. Cloud computing technology has powerful data collection functions, self-service functions, and unified resource scheduling functions. Introducing it into the human resources file management system can greatly improve management efficiency. In order to carry out information management of rural human resources, this paper develops a rural human resources management system based on the Internet of Things. This paper introduces the design scheme of rural human resource management platform based on Internet of Things technology and cloud computing technology. The design of this system mainly includes organization setting, post planning, personnel management, salary management, insurance benefits, recruitment and selection, training management, performance appraisal management, labor contract management, comprehensive inquiry, rules and regulations, employee self-help, system setting, and system management function modules. The research results show that the rural human resource management system based on cloud computing can provide a complete human resource management solution for the vast rural areas. It can only purchase services, save a lot of development and maintenance costs, and also customize functions, so as to better meet the needs of use.}, } @article {pmid35531323, year = {2022}, author = {Munjal, K and Bhatia, R}, title = {A systematic review of homomorphic encryption and its contributions in healthcare industry.}, journal = {Complex & intelligent systems}, volume = {}, number = {}, pages = {1-28}, pmid = {35531323}, issn = {2198-6053}, abstract = {Cloud computing and cloud storage have contributed to a big shift in data processing and its use. Availability and accessibility of resources with the reduction of substantial work is one of the main reasons for the cloud revolution. With this cloud computing revolution, outsourcing applications are in great demand. The client uses the service by uploading their data to the cloud and finally gets the result by processing it. It benefits users greatly, but it also exposes sensitive data to third-party service providers. In the healthcare industry, patient health records are digital records of a patient's medical history kept by hospitals or health care providers. Patient health records are stored in data centers for storage and processing. Before doing computations on data, traditional encryption techniques decrypt the data in their original form. As a result, sensitive medical information is lost. Homomorphic encryption can protect sensitive information by allowing data to be processed in an encrypted form such that only encrypted data is accessible to service providers. In this paper, an attempt is made to present a systematic review of homomorphic cryptosystems with its categorization and evolution over time. In addition, this paper also includes a review of homomorphic cryptosystem contributions in healthcare.}, } @article {pmid35530181, year = {2022}, author = {Kumar, V and Mahmoud, MS and Alkhayyat, A and Srinivas, J and Ahmad, M and Kumari, A}, title = {RAPCHI: Robust authentication protocol for IoMT-based cloud-healthcare infrastructure.}, journal = {The Journal of supercomputing}, volume = {78}, number = {14}, pages = {16167-16196}, pmid = {35530181}, issn = {0920-8542}, abstract = {With the fast growth of technologies like cloud computing, big data, the Internet of Things, artificial intelligence, and cyber-physical systems, the demand for data security and privacy in communication networks is growing by the day. Patient and doctor connect securely through the Internet utilizing the Internet of medical devices in cloud-healthcare infrastructure (CHI). In addition, the doctor offers to patients online treatment. Unfortunately, hackers are gaining access to data at an alarming pace. In 2019, 41.4 million times, healthcare systems were compromised by attackers. In this context, we provide a secure and lightweight authentication scheme (RAPCHI) for CHI employing Internet of medical Things (IoMT) during pandemic based on cryptographic primitives. The suggested framework is more secure than existing frameworks and is resistant to a wide range of security threats. The paper also explains the random oracle model (ROM) and uses two alternative approaches to validate the formal security analysis of RAPCHI. Further, the paper shows that RAPCHI is safe against man-in-the-middle and reply attacks using the simulation programme AVISPA. In addition, the paper compares RAPCHI to related frameworks and discovers that it is relatively light in terms of computation and communication. These findings demonstrate that the proposed paradigm is suitable for use in real-world scenarios.}, } @article {pmid35528357, year = {2022}, author = {Gao, J}, title = {Network Intrusion Detection Method Combining CNN and BiLSTM in Cloud Computing Environment.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7272479}, pmid = {35528357}, issn = {1687-5273}, mesh = {Algorithms ; *Cloud Computing ; *Neural Networks, Computer ; }, abstract = {A network intrusion detection method combining CNN and BiLSTM network is proposed. First, the KDD CUP 99 data set is preprocessed by using data extraction algorithm. The data set is transformed into image data set by data cleaning, data extraction, and data mapping; Second, CNN is used to extract the parallel local features of attribute information, and BiLSTM is used to extract the features of long-distance-dependent information, so as to fully consider the influence between the front and back attribute information, and attention mechanism is introduced to improve the classification accuracy. Finally, C5.0 decision tree and CNN BiLSTM deep learning model are combined to skip the design feature selection and directly use deep learning model to learn the representational features of high-dimensional data. Experimental results show that, compared with the methods based on AE-AlexNet and SGM-CNN, the network intrusion detection effect of this method is better, the average accuracy can be improved to 95.50%, the false-positive rate can be reduced to 4.24%, and the false positive rate can be reduced to 6.66%. The proposed method can significantly improve the performance of network intrusion detection system.}, } @article {pmid35528215, year = {2023}, author = {Ahmed, K and Saini, M}, title = {FCML-gait: fog computing and machine learning inspired human identity and gender recognition using gait sequences.}, journal = {Signal, image and video processing}, volume = {17}, number = {4}, pages = {925-936}, pmid = {35528215}, issn = {1863-1703}, abstract = {Security threats are always there if the human intruders are not identified and recognized well in time in highly security-sensitive environments like the military, airports, parliament houses, and banks. Fog computing and machine learning algorithms on Gait sequences can prove to be better for restricting intruders promptly. Gait recognition provides the ability to observe an individual unobtrusively, without any direct cooperation or interaction from the people, making it very attractive than other biometric recognition techniques. In this paper, a Fog Computing and Machine Learning Inspired Human Identity and Gender Recognition using Gait Sequences (FCML-Gait) are proposed. Internet of things (IoT) devices and video capturing sensors are used to acquire data. Frames are clustered using the affinity propagation (AP) clustering technique into several clusters, and cluster-based averaged gait image(C-AGI) feature is determined for each cluster. For training and testing of datasets, sparse reconstruction-based metric learning (SRML) and Speeded Up Robust Features (SURF) with support vector machine (SVM) are applied on benchmark gait database ADSC-AWD having 80 subjects of 20 different individuals in the Fog Layer to improve the processing. The performance metrics, for instance, accuracy, precision, recall, F-measure, C-time, and R-time have been measured, and a comparative evaluation of the projected method with the existing SRML technique has been provided in which the proposed FCML-Gait outperforms and attains the highest accuracy of 95.49%.}, } @article {pmid35528159, year = {2022}, author = {Aldahwan, NS and Ramzan, MS}, title = {Quadruple Theories Based Determinants and their Causal Relationships Affecting the Adoption of Community Cloud in Saudi HEI.}, journal = {BioMed research international}, volume = {2022}, number = {}, pages = {2382535}, pmid = {35528159}, issn = {2314-6141}, mesh = {*Cloud Computing ; Reproducibility of Results ; Saudi Arabia ; *Technology ; }, abstract = {The higher education institutions (HEIs) are adopting the new modern cloud computing technique rapidly due to its cost effectiveness, efficient and productive feature. Though cloud computing technology is beneficial to educational sector, it is important to assess their economic benefits, technical, organizational, environmental appropriateness and potential obstacles before adopting the new technology. There are four evaluating theory for adopting the cloud computing technology which are the Technology Organization Environment (TOE), the Technology Acceptance Model (TAM), the Diffusion of Innovation (DOI), and the Institutional (INT). This study has developed a new adoption framework for accepting cloud computing technology for HEIs of Saudi by integrating the above mentioned theories. This framework is unique from others because no research has been conducted yet on the adoption of community cloud at the organizational level considering the four theory simultaneously. This research has developed 25 hypotheses on the adoption of community cloud computing in HEIs and analyzed those hypotheses using SPSS statistical analysis software. The reliability of the data was tested by utilizing composite reliability and Cronbach's alpha method. This study have introduced an innovative approach and framework to understand the adoption of the community cloud which will help the decision-makers to build strategies in their organizations for effective adoption of community cloud services.}, } @article {pmid35521547, year = {2022}, author = {Elisseev, V and Gardiner, LJ and Krishna, R}, title = {Scalable in-memory processing of omics workflows.}, journal = {Computational and structural biotechnology journal}, volume = {20}, number = {}, pages = {1914-1924}, pmid = {35521547}, issn = {2001-0370}, abstract = {We present a proof of concept implementation of the in-memory computing paradigm that we use to facilitate the analysis of metagenomic sequencing reads. In doing so we compare the performance of POSIX™file systems and key-value storage for omics data, and we show the potential for integrating high-performance computing (HPC) and cloud native technologies. We show that in-memory key-value storage offers possibilities for improved handling of omics data through more flexible and faster data processing. We envision fully containerized workflows and their deployment in portable micro-pipelines with multiple instances working concurrently with the same distributed in-memory storage. To highlight the potential usage of this technology for event driven and real-time data processing, we use a biological case study focused on the growing threat of antimicrobial resistance (AMR). We develop a workflow encompassing bioinformatics and explainable machine learning (ML) to predict life expectancy of a population based on the microbiome of its sewage while providing a description of AMR contribution to the prediction. We propose that in future, performing such analyses in 'real-time' would allow us to assess the potential risk to the population based on changes in the AMR profile of the community.}, } @article {pmid35464181, year = {2021}, author = {Dawood, HM and Liew, CY and Lau, TC}, title = {Mobile perceived trust mediation on the intention and adoption of FinTech innovations using mobile technology: A systematic literature review.}, journal = {F1000Research}, volume = {10}, number = {}, pages = {1252}, pmid = {35464181}, issn = {2046-1402}, mesh = {Artificial Intelligence ; *COVID-19 ; Humans ; *Intention ; Technology ; Trust ; }, abstract = {The banking and financial sectors have witnessed a significant development recently due to financial technology (FinTech), and it has become an essential part of the financial system. Many factors helped the development of this sector, including the pandemics such as Covid-19, the considerable increasing market value of the FinTech sector worldwide, and new technologies such as blockchain, artificial intelligence, big data, cloud computing and mobile technology. Moreover, changes in consumer's preferences, especially the Z-generation (digital generation). FinTech shifted the traditional business models to mobile platforms characterized by ease of access and swift transactions. Mobile technology became the main backbone for FinTech innovations and acts as a channel to deliver FinTech services that overcome all geographical and timing barriers, thus enhancing financial inclusion. Mobile perceived Trust (MPT), or the trust in using financial business models via mobile technology, is a crucial factor in the FinTech context that has mediation effects on the intention and adoption of different FinTech business models. Unfortunately, few studies have explored MPT mediations on consumers' intention to adopt FinTech innovations using mobile technology. Typically, many studies examined trust/MPT as an independent and unidirectional variable and investigated its effects on behaviour intention without predicting its mediation effects. This study aimed to develop a systematic literature review on MPT mediation in FinTech, focusing on the period from 2016 and 2021, in journals ranked Q1 and Q2, and known-based theories such as the technology acceptance model, the unified theory of acceptance and use of technology, and the mobile technology acceptance model. This study found that only four articles were published in Q1 and Q2 journals. In these articles, the MPT was used as a mediator, and its effects were measured on the intention and adoption of the behaviour.}, } @article {pmid35511912, year = {2022}, author = {Kim, YK and Kim, HJ and Lee, H and Chang, JW}, title = {Privacy-preserving parallel kNN classification algorithm using index-based filtering in cloud computing.}, journal = {PloS one}, volume = {17}, number = {5}, pages = {e0267908}, pmid = {35511912}, issn = {1932-6203}, mesh = {Algorithms ; Artificial Intelligence ; *Cloud Computing ; Computer Security ; *Privacy ; }, abstract = {With the development of cloud computing, interest in database outsourcing has recently increased. In cloud computing, it is necessary to protect the sensitive information of data owners and authorized users. For this, data mining techniques over encrypted data have been studied to protect the original database, user queries and data access patterns. The typical data mining technique is kNN classification which is widely used for data analysis and artificial intelligence. However, existing works do not provide a sufficient level of efficiency for a large amount of encrypted data. To solve this problem, in this paper, we propose a privacy-preserving parallel kNN classification algorithm. To reduce the computation cost for encryption, we propose an improved secure protocol by using an encrypted random value pool. To reduce the query processing time, we not only design a parallel algorithm, but also adopt a garbled circuit. In addition, the security analysis of the proposed algorithm is performed to prove its data protection, query protection, and access pattern protection. Through our performance evaluation, the proposed algorithm shows about 2∼25 times better performance compared with existing algorithms.}, } @article {pmid35511843, year = {2022}, author = {Ghosh, A and Saha, R and Misra, S}, title = {Persistent Service Provisioning Framework for IoMT Based Emergency Mobile Healthcare Units.}, journal = {IEEE journal of biomedical and health informatics}, volume = {26}, number = {12}, pages = {5851-5858}, doi = {10.1109/JBHI.2022.3172624}, pmid = {35511843}, issn = {2168-2208}, mesh = {Humans ; *Delivery of Health Care ; Monitoring, Physiologic ; *Internet ; Cloud Computing ; }, abstract = {The resource constrained nature of IoT devices set about task offloading over the Internet for robust processing. However, this increases the Turnaround Time (TAT) of the IoT services. High TATs may cause catastrophe in time-sensitive environments such as chemical and steel industries, vehicular networks, healthcare, and others. Moreover, the unreliable Internet in rural parts of underdeveloped and developing countries is unsuitable for time-critical IoT systems. In this work, we propose a framework for continuous delivery of IoT services to address the issue of high latency/TAT with poor/no-internet coverage. The proposed framework guarantees service delivery in such areas. To demonstrate the proposed framework, we implemented an IoT-based mobile patient monitoring system. It predicts the patient's criticality using actual sensor data. When the sensed parameters exceed the pre-set threshold in the rule-base, it initiates data transfer to the fog or cloud server. If fog or the cloud is unreachable, it performs onboard predictions. Thus, the framework ensures essential service delivery to the user at all times. Our test-bed-based evaluation demonstrates edge CPU and RAM load reduction of 16% and 26%, respectively, in the ML model's test phase. Also, the results confirm continuous service delivery, reduced latency, power and computing resource consumption.}, } @article {pmid35511842, year = {2022}, author = {Chen, J and Zheng, Y and Liang, Y and Zhan, Z and Jiang, M and Zhang, X and da Silva, DS and Wu, W and Albuquerque, VHC}, title = {Edge2Analysis: A Novel AIoT Platform for Atrial Fibrillation Recognition and Detection.}, journal = {IEEE journal of biomedical and health informatics}, volume = {26}, number = {12}, pages = {5772-5782}, doi = {10.1109/JBHI.2022.3171918}, pmid = {35511842}, issn = {2168-2208}, mesh = {Humans ; *Atrial Fibrillation/diagnosis ; Artificial Intelligence ; Neural Networks, Computer ; Electrocardiography ; Computer Simulation ; }, abstract = {Atrial fibrillation (AF) is a serious medical condition of the heart potentially leading to stroke, which can be diagnosed by analyzing electrocardiograms (ECG). Technologies of Artificial Intelligence of Things (AIoT) enable smart abnormality detection by analyzing streaming healthcare data from the sensor end of users. Analyzing streaming data in the cloud leads to challenges of response latency and privacy issues, and local inference by a model deployed on the user end brings difficulties in model update and customization. Therefore, we propose an AIoT Platform with AF recognition neural networks on the sensor edge with model retraining ability on a resource-constrained embedded system. To this aim, we proposed to combine simple but effective neural networks and an ECG feature selection strategy to reduce computing complexity while maintaining recognition performance. Based on the platform, we evaluated and discussed the performance, response time, and requirements for model retraining in the scenario of AF detection from ECG recordings. The proposed lightweight solution was validated with two public datasets and an ECG data stream simulation on an ATmega2560 processor, proving the feasibility of analysis and training on edge.}, } @article {pmid35510052, year = {2022}, author = {Wang, L}, title = {Internet of Things Device Identification Algorithm considering User Privacy.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6173185}, pmid = {35510052}, issn = {1687-5273}, mesh = {Algorithms ; *Blockchain ; Computer Security ; Humans ; Internet ; *Internet of Things ; Privacy ; }, abstract = {The Internet of Things has become the third wave of the information industry and cloud computing, big data, and Internet technologies. Among the many identification technologies used in the Internet of Things, radiofrequency identification technology is undoubtedly one of the most popular methods today. It is replacing the traditional contact IC card and becoming a new trend of smart cards. At the same time, a large amount of data is generated in the IoT environment. A lot of data involve user privacy, and users do not have good control over these data. Collecting and utilizing these data on the basis of protecting user privacy have become an important problem to be solved urgently. With the implementation of the strategy of rejuvenating the country through science and education, major colleges and universities are developing rapidly through enrollment and expansion, which also brings inconvenience to campus security management. Although the traditional campus all-in-one card system can guarantee the security identity of people entering and leaving, it does not reasonably integrate and utilize this information, resulting in waste of information resources and, to a certain extent, the problem of user privacy leakage. To solve the above problems, a new system was developed to integrate resources to identify users. To protect the privacy data of Internet of Things users, a specific solution using blockchain technology is proposed; for the identity authentication problem of Internet of Things users, the identity authentication based on the public key address of the blockchain is used on the chain, and the group signature is used off the chain. The identity authentication method solves the contradiction between anonymity and traceability in blockchain application scenarios. The simulation results show that the system not only considers user privacy but also has extremely important practical significance for the promotion of Internet of Things and RF applications.}, } @article {pmid35510050, year = {2022}, author = {Mittal, S and Bansal, A and Gupta, D and Juneja, S and Turabieh, H and Elarabawy, MM and Sharma, A and Bitsue, ZK}, title = {Using Identity-Based Cryptography as a Foundation for an Effective and Secure Cloud Model for E-Health.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7016554}, pmid = {35510050}, issn = {1687-5273}, mesh = {Algorithms ; Cloud Computing ; *Computer Security ; Humans ; Research Design ; *Telemedicine ; }, abstract = {Nowadays, one of the most popular applications is cloud computing for storing data and information through World Wide Web. Since cloud computing has become available, users are rapidly increasing. Cloud computing enables users to obtain a better and more effective application at a lower cost in a more satisfactory way. Health services data must therefore be kept as safe and secure as possible because the release of this data could have serious consequences for patients. A framework for security and privacy must be employed to store and manage extremely sensitive data. Patients' confidential health records have been encrypted and saved in the cloud using cypher text so far. To ensure privacy and security in a cloud computing environment is a big issue. The medical system has been designed as a standard, access of records, and effective use by medical practitioners as required. In this paper, we propose a novel algorithm along with implementation details as an effective and secure E-health cloud model using identity-based cryptography. The comparison of the proposed and existing techniques has been carried out in terms of time taken for encryption and decryption, energy, and power. Decryption time has been decreased up to 50% with the proposed method of cryptography. As it will take less time for decryption, less power is consumed for doing the cryptography operations.}, } @article {pmid35503992, year = {2022}, author = {Pinter, N and Glätzer, D and Fahrner, M and Fröhlich, K and Johnson, J and Grüning, BA and Warscheid, B and Drepper, F and Schilling, O and Föll, MC}, title = {MaxQuant and MSstats in Galaxy Enable Reproducible Cloud-Based Analysis of Quantitative Proteomics Experiments for Everyone.}, journal = {Journal of proteome research}, volume = {21}, number = {6}, pages = {1558-1565}, doi = {10.1021/acs.jproteome.2c00051}, pmid = {35503992}, issn = {1535-3907}, mesh = {Cloud Computing ; Mass Spectrometry/methods ; Proteins/analysis ; *Proteomics/methods ; Reproducibility of Results ; *Software ; }, abstract = {Quantitative mass spectrometry-based proteomics has become a high-throughput technology for the identification and quantification of thousands of proteins in complex biological samples. Two frequently used tools, MaxQuant and MSstats, allow for the analysis of raw data and finding proteins with differential abundance between conditions of interest. To enable accessible and reproducible quantitative proteomics analyses in a cloud environment, we have integrated MaxQuant (including TMTpro 16/18plex), Proteomics Quality Control (PTXQC), MSstats, and MSstatsTMT into the open-source Galaxy framework. This enables the web-based analysis of label-free and isobaric labeling proteomics experiments via Galaxy's graphical user interface on public clouds. MaxQuant and MSstats in Galaxy can be applied in conjunction with thousands of existing Galaxy tools and integrated into standardized, sharable workflows. Galaxy tracks all metadata and intermediate results in analysis histories, which can be shared privately for collaborations or publicly, allowing full reproducibility and transparency of published analysis. To further increase accessibility, we provide detailed hands-on training materials. The integration of MaxQuant and MSstats into the Galaxy framework enables their usage in a reproducible way on accessible large computational infrastructures, hence realizing the foundation for high-throughput proteomics data science for everyone.}, } @article {pmid35501696, year = {2022}, author = {Hadish, JA and Biggs, TD and Shealy, BT and Bender, MR and McKnight, CB and Wytko, C and Smith, MC and Feltus, FA and Honaas, L and Ficklin, SP}, title = {GEMmaker: process massive RNA-seq datasets on heterogeneous computational infrastructure.}, journal = {BMC bioinformatics}, volume = {23}, number = {1}, pages = {156}, pmid = {35501696}, issn = {1471-2105}, support = {1659300//National Science Foundation/ ; AP-19-103//Washington Tree Fruit Research Commission/ ; Emerging Research Initiatives//Washington State University/ ; Livestock Health//Washington State University/ ; Food Security program award//Washington State University/ ; 1014919//U.S. Department of Agriculture/ ; WNP00009//McIntyre Stennis/ ; }, mesh = {*High-Throughput Nucleotide Sequencing/methods ; RNA-Seq ; Reproducibility of Results ; Sequence Analysis, RNA/methods ; *Software ; }, abstract = {BACKGROUND: Quantification of gene expression from RNA-seq data is a prerequisite for transcriptome analysis such as differential gene expression analysis and gene co-expression network construction. Individual RNA-seq experiments are larger and combining multiple experiments from sequence repositories can result in datasets with thousands of samples. Processing hundreds to thousands of RNA-seq data can result in challenges related to data management, access to sufficient computational resources, navigation of high-performance computing (HPC) systems, installation of required software dependencies, and reproducibility. Processing of larger and deeper RNA-seq experiments will become more common as sequencing technology matures.

RESULTS: GEMmaker, is a nf-core compliant, Nextflow workflow, that quantifies gene expression from small to massive RNA-seq datasets. GEMmaker ensures results are highly reproducible through the use of versioned containerized software that can be executed on a single workstation, institutional compute cluster, Kubernetes platform or the cloud. GEMmaker supports popular alignment and quantification tools providing results in raw and normalized formats. GEMmaker is unique in that it can scale to process thousands of local or remote stored samples without exceeding available data storage.

CONCLUSIONS: Workflows that quantify gene expression are not new, and many already address issues of portability, reusability, and scale in terms of access to CPUs. GEMmaker provides these benefits and adds the ability to scale despite low data storage infrastructure. This allows users to process hundreds to thousands of RNA-seq samples even when data storage resources are limited. GEMmaker is freely available and fully documented with step-by-step setup and execution instructions.}, } @article {pmid35498196, year = {2022}, author = {Almuzaini, KK and Sinhal, AK and Ranjan, R and Goel, V and Shrivastava, R and Halifa, A}, title = {Key Aggregation Cryptosystem and Double Encryption Method for Cloud-Based Intelligent Machine Learning Techniques-Based Health Monitoring Systems.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {3767912}, pmid = {35498196}, issn = {1687-5273}, mesh = {*Cloud Computing ; Commerce ; Humans ; Intelligence ; Machine Learning ; *Research Design ; }, abstract = {Cloud technology is a business strategy that aims to provide the necessary material to customers depending on their needs. Individuals and cloud businesses alike have embraced the cloud storage service, which has become the most widely used service. The industries outsource their data to cloud storage space to relieve themselves of the load of dealing with redundant data contents. This must be protected to prevent the theft of personal belongings, and privacy must be improved as well. Different research projects have been suggested to ensure the safe management of the information included within the data content. The security of current research projects, on the contrary, still needs improvement. As a result, this method has been suggested to address the security concerns associated with cloud computing. The primary goal of this study effort is to offer a safe environment for cloud users while also increasing the profit of cloud resource providers by managing and securely delivering data contents to the cloud users. The bulk of sectors, including business, finance, military, and healthcare industry, do not store data in cloud-based storage systems. This technique is used to attract these kinds of customers. Increasing public acceptance, medical researchers are drawn to cloud computing because it allows them to store their study material in a centralized location and distribute and access it in a more flexible manner. They were collected from numerous individuals who were being evaluated for medical care at the time. Scalable and enhanced key aggregate cryptosystem is a protected data protection method that provides highly effective security in the healthcare industry. When parties interested in a dispute disagree on the outflow of sensitive information, this technique manages the disputes and ensures the data security deployment of a cloud-based intelligent health monitoring system for the parties involved. The encrypted data structure of medical and healthcare prescriptions is recorded as they move through the hands of patients and healthcare facilities, according to the technique recommended. The double encryption approach is used in order to raise the overall degree of security. An encryption class is created by referring to the Ciphertext ID during the encryption procedure. The keyholder is a master secret key that facilitates in the recovery of the secret keys of various monsters and creatures by acting as a conduit between them. It is transferred and stored as a single aggregate for the benefit of the patient or customer in order to make decryption more convenient and efficient. A safe connection between cloud-based intelligent health monitoring systems and healthcare organizations and their patients may be established via the use of a key aggregation cryptosystem and a double encryption approach, according to the researchers. Because of this, when compared to earlier techniques, the findings reveal that the research methodology provides high levels of security in terms of confidentiality and integrity, in addition to excellent scalability.}, } @article {pmid35495546, year = {2021}, author = {Vekaria, K and Calyam, P and Sivarathri, SS and Wang, S and Zhang, Y and Pandey, A and Chen, C and Xu, D and Joshi, T and Nair, S}, title = {Recommender-as-a-Service with Chatbot Guided Domain-science Knowledge Discovery in a Science Gateway.}, journal = {Concurrency and computation : practice & experience}, volume = {33}, number = {19}, pages = {}, pmid = {35495546}, issn = {1532-0626}, support = {R01 MH122023/MH/NIMH NIH HHS/United States ; }, abstract = {Scientists in disciplines such as neuroscience and bioinformatics are increasingly relying on science gateways for experimentation on voluminous data, as well as analysis and visualization in multiple perspectives. Though current science gateways provide easy access to computing resources, datasets and tools specific to the disciplines, scientists often use slow and tedious manual efforts to perform knowledge discovery to accomplish their research/education tasks. Recommender systems can provide expert guidance and can help them to navigate and discover relevant publications, tools, data sets, or even automate cloud resource configurations suitable for a given scientific task. To realize the potential of integration of recommenders in science gateways in order to spur research productivity, we present a novel "OnTimeRecommend" recommender system. The OnTimeRecommend comprises of several integrated recommender modules implemented as microservices that can be augmented to a science gateway in the form of a recommender-as-a-service. The guidance for use of the recommender modules in a science gateway is aided by a chatbot plug-in viz., Vidura Advisor. To validate our OnTimeRecommend, we integrate and show benefits for both novice and expert users in domain-specific knowledge discovery within two exemplar science gateways, one in neuroscience (CyNeuro) and the other in bioinformatics (KBCommons).}, } @article {pmid35494839, year = {2022}, author = {Wang, B and Cheng, J and Cao, J and Wang, C and Huang, W}, title = {Integer particle swarm optimization based task scheduling for device-edge-cloud cooperative computing to improve SLA satisfaction.}, journal = {PeerJ. Computer science}, volume = {8}, number = {}, pages = {e893}, pmid = {35494839}, issn = {2376-5992}, abstract = {Task scheduling helps to improve the resource efficiency and the user satisfaction for Device-Edge-Cloud Cooperative Computing (DE3C), by properly mapping requested tasks to hybrid device-edge-cloud resources. In this paper, we focused on the task scheduling problem for optimizing the Service-Level Agreement (SLA) satisfaction and the resource efficiency in DE3C environments. Existing works only focused on one or two of three sub-problems (offloading decision, task assignment and task ordering), leading to a sub-optimal solution. To address this issue, we first formulated the problem as a binary nonlinear programming, and proposed an integer particle swarm optimization method (IPSO) to solve the problem in a reasonable time. With integer coding of task assignment to computing cores, our proposed method exploited IPSO to jointly solve the problems of offloading decision and task assignment, and integrated earliest deadline first scheme into the IPSO to solve the task ordering problem for each core. Extensive experimental results showed that our method achieved upto 953% and 964% better performance than that of several classical and state-of-the-art task scheduling methods in SLA satisfaction and resource efficiency, respectively.}, } @article {pmid35492501, year = {2021}, author = {Sherbert, K and Cerasoli, F and Buongiorno Nardelli, M}, title = {A systematic variational approach to band theory in a quantum computer.}, journal = {RSC advances}, volume = {11}, number = {62}, pages = {39438-39449}, pmid = {35492501}, issn = {2046-2069}, abstract = {Quantum computers promise to revolutionize our ability to simulate molecules, and cloud-based hardware is becoming increasingly accessible to a wide body of researchers. Algorithms such as Quantum Phase Estimation and the Variational Quantum Eigensolver are being actively developed and demonstrated in small systems. However, extremely limited qubit count and low fidelity seriously limit useful applications, especially in the crystalline phase, where compact orbital bases are difficult to develop. To address this difficulty, we present a hybrid quantum-classical algorithm to solve the band structure of any periodic system described by an adequate tight-binding model. We showcase our algorithm by computing the band structure of a simple-cubic crystal with one s and three p orbitals per site (a simple model for polonium) using simulators with increasingly realistic levels of noise and culminating with calculations on IBM quantum computers. Our results show that the algorithm is reliable in a low-noise device, functional with low precision on present-day noisy quantum computers, and displays a complexity that scales as Ω(M [3]) with the number M of tight-binding orbitals per unit-cell, similarly to its classical counterparts. Our simulations offer a new insight into the "quantum" mindset and demonstrate how the algorithms under active development today can be optimized in special cases, such as band structure calculations.}, } @article {pmid35492053, year = {2021}, author = {Read, RL and Clarke, L and Mulligan, G}, title = {VentMon: An open source inline ventilator tester and monitor.}, journal = {HardwareX}, volume = {9}, number = {}, pages = {e00195}, pmid = {35492053}, issn = {2468-0672}, abstract = {Humanitarian engineers responded to the pandemic ventilator shortage of March, 2020 by beginning over 100 open source ventilator projects [Robert L. Read et al. COVID-19 Vent List. Oct. 2020. url: https://docs.google.com/spreadsheets/d/1inYw5H4RiL0AC_J9vPWzJxXCdlkMLPBRdPgEVKF8DZw/edit#gid=0, Joshua M. Pearce. A review of open source ventilators for COVID-19 and future pandemics. In: F1000Research 9 (2020).]. By ventilator, we mean both an invasive ventilator (requiring intubation of the patient) and non-invasive ventilator (generally supporting spontaneously breathing). Inexpensive ventilator test equipment can facilitate projects forced to be geographically distributed by lockdowns. The VentMon is a modular, open source, IoT-enabled tester that plugs into a standard 22 mm airway between a ventilator and a physical test lung to test any ventilator. The VentMon measures flow, pressure, fractional oxygen, humidity, and temperature. Data is stored and graphed at a data lake accessible to all devlopment team members, and, eventually, clinicians. The open source design of the VentMon, its firmware, and cloud-based software may allow it to be used as a component of modular ventilators to provide a clinical readout. The software system surrounding VentMon has been designed to be as modular and composable as possible. By combining new, openly published standards for data with composable and modifiable hardware, the VentMon forms the beginning of an open system or eco-system of ventilation devices and data. Thanks to grants, 20 VentMons have been given away free of charge to pandemic response teams building open source ventilators.}, } @article {pmid35491772, year = {2022}, author = {Crichton, DJ and Cinquini, L and Kincaid, H and Mahabal, A and Altinok, A and Anton, K and Colbert, M and Kelly, S and Liu, D and Patriotis, C and Lombeyda, S and Srivastava, S}, title = {From space to biomedicine: Enabling biomarker data science in the cloud.}, journal = {Cancer biomarkers : section A of Disease markers}, volume = {33}, number = {4}, pages = {479-488}, doi = {10.3233/CBM-210350}, pmid = {35491772}, issn = {1875-8592}, mesh = {*Artificial Intelligence ; Biomarkers, Tumor ; *Data Science ; Ecosystem ; Humans ; Software ; }, abstract = {NASA's Jet Propulsion Laboratory (JPL) is advancing research capabilities for data science with two of the National Cancer Institute's major research programs, the Early Detection Research Network (EDRN) and the Molecular and Cellular Characterization of Screen-Detected Lesions (MCL), by enabling data-driven discovery for cancer biomarker research. The research team pioneered a national data science ecosystem for cancer biomarker research to capture, process, manage, share, and analyze data across multiple research centers. By collaborating on software and data-driven methods developed for space and earth science research, the biomarker research community is heavily leveraging similar capabilities to support the data and computational demands to analyze research data. This includes linking diverse data from clinical phenotypes to imaging to genomics. The data science infrastructure captures and links data from over 1600 annotations of cancer biomarkers to terabytes of analysis results on the cloud in a biomarker data commons known as "LabCAS". As the data increases in size, it is critical that automated approaches be developed to "plug" laboratories and instruments into a data science infrastructure to systematically capture and analyze data directly. This includes the application of artificial intelligence and machine learning to automate annotation and scale science analysis.}, } @article {pmid35480156, year = {2022}, author = {Ren, H and Dan, W}, title = {Analysis of Reasonable Respiratory Efficiency in Tennis Competition and Training Environment Based on Cloud Computing.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {4289667}, pmid = {35480156}, issn = {2040-2309}, mesh = {Cloud Computing ; Humans ; Oxygen ; Physical Fitness ; Respiratory Rate ; *Tennis/physiology ; }, abstract = {Competitive tennis is developing in the direction of quantification. How to use and give full play to all positive factors, in order to attack actively and give full play to the limits of body and psychology, breathing, as the basic metabolic function of human body, also plays a vital role in tennis. This paper studies that it plays an important role in the rationality and explosiveness of sports and the psychological and physiological regulation in competition. The characteristics of tennis events determine the importance of scientific and rational breathing. Reasonable breathing during exercise is conducive to maintaining the basic stability of the internal environment, improving the training effect, and giving full play to the functional ability of the human body, so as to create excellent sports results. First, reduce respiratory resistance. Second, there are two methods to improve alveolar ventilation efficiency and pulmonary ventilation: increasing respiratory rate and increasing respiratory depth. When the inhalation volume is constant, the alveolar gas freshness rate depends on the functional residual volume in the alveolar cavity at the end of expiratory or before inhalation. The less functional the residual air, the more fresh air inhaled, and the higher the oxygen partial pressure in alveolar gas. An effective way to reduce the functional residual volume in the alveolar cavity is to exhale as deeply as possible, so as to ensure that more oxygen enters the body. Reasonable breathing methods can not only accelerate the excitation of the body, increase movement strength, reduce fatigue, and promote recovery but also play a vital role in the rational allocation of physical fitness and the improvement of sports performance. The purpose of this study is to provide a theoretical basis for scientific tennis training by analyzing the characteristics of tennis events, the form of breathing in tennis and the efficiency of reasonable breathing in tennis.}, } @article {pmid35475238, year = {2021}, author = {Mano, T and Murata, K and Kon, K and Shimizu, C and Ono, H and Shi, S and Yamada, RG and Miyamichi, K and Susaki, EA and Touhara, K and Ueda, HR}, title = {CUBIC-Cloud provides an integrative computational framework toward community-driven whole-mouse-brain mapping.}, journal = {Cell reports methods}, volume = {1}, number = {2}, pages = {100038}, pmid = {35475238}, issn = {2667-2375}, mesh = {Mice ; Animals ; *Brain/diagnostic imaging ; Brain Mapping ; *Alzheimer Disease/diagnostic imaging ; Neurons ; }, abstract = {Recent advancements in tissue clearing technologies have offered unparalleled opportunities for researchers to explore the whole mouse brain at cellular resolution. With the expansion of this experimental technique, however, a scalable and easy-to-use computational tool is in demand to effectively analyze and integrate whole-brain mapping datasets. To that end, here we present CUBIC-Cloud, a cloud-based framework to quantify, visualize, and integrate mouse brain data. CUBIC-Cloud is a fully automated system where users can upload their whole-brain data, run analyses, and publish the results. We demonstrate the generality of CUBIC-Cloud by a variety of applications. First, we investigated the brain-wide distribution of five cell types. Second, we quantified Aβ plaque deposition in Alzheimer's disease model mouse brains. Third, we reconstructed a neuronal activity profile under LPS-induced inflammation by c-Fos immunostaining. Last, we show brain-wide connectivity mapping by pseudotyped rabies virus. Together, CUBIC-Cloud provides an integrative platform to advance scalable and collaborative whole-brain mapping.}, } @article {pmid35464821, year = {2023}, author = {Hassan, N and Aazam, M and Tahir, M and Yau, KA}, title = {Floating Fog: extending fog computing to vast waters for aerial users.}, journal = {Cluster computing}, volume = {26}, number = {1}, pages = {181-195}, pmid = {35464821}, issn = {1386-7857}, abstract = {There are thousands of flights carrying millions of passengers each day, having three or more Internet-connected devices with them on average. Usually, onboard devices remain idle for most of the journey (which can be of several hours), therefore, we can tap on their underutilized potential. Although these devices are generally becoming more and more resourceful, for complex services (such as related to machine learning, augmented/virtual reality, smart healthcare, and so on) those devices do not suffice standalone. This makes a case for multi-device resource aggregation such as through femto-cloud. As our first contribution, we present the utility of femto-cloud for aerial users. But for that sake, a reliable and faster Internet is required (to access online services or cloud resources), which is currently not the case with satellite-based Internet. That is the second challenge we try to address in our paper, by presenting an adaptive beamforming-based solution for aerial Internet provisioning. However, on average, most of the flight path is above waters. Given that, we propose that beamforming transceivers can be docked on stationery ships deployed in the vast waters (such as the ocean). Nevertheless, certain services would be delay-sensitive, and accessing their on-ground servers or cloud may not be feasible (in terms of delay). Similarly, certain complex services may require resources in addition to the flight-local femto-cloud. That is the third challenge we try to tackle in this paper, by proposing that the traditional fog computing (which is a cloud-like but localized pool of resources) can also be extended to the waters on the ships harboring beamforming transceivers. We name it Floating Fog. In addition to that, Floating Fog will enable several new services such as live black-box. We also present a cost and bandwidth analysis to highlight the potentials of Floating Fog. Lastly, we identify some challenges to tackle the successful deployment of Floating Fog.}, } @article {pmid35463737, year = {2022}, author = {Jyotsna, and Nand, P}, title = {Novel DLSNNC and SBS based framework for improving QoS in healthcare-IoT applications.}, journal = {International journal of information technology : an official journal of Bharati Vidyapeeth's Institute of Computer Applications and Management}, volume = {14}, number = {4}, pages = {2093-2103}, pmid = {35463737}, issn = {2511-2112}, abstract = {Health care system is intended to enhance one's health and as a result, one's quality of life. In order to fulfil its social commitment, health care must focus on producing social profit to sustain itself. Also, due to ever increasing demand of healthcare sector, there is drastic rise in the amount of patient data that is produced and needs to be stored for long duration for clinical reference. The risk of patient data being lost due to a data centre failure can be minimized by including a fog layer into the cloud computing architecture. Furthermore, the burden of such data produced is stored on the cloud. In order to increase service quality, we introduce fog computing based on deep learning sigmoid-based neural network clustering (DLSNNC) and score-based scheduling (SBS). Fog computing begins by collecting and storing healthcare data on the cloud layer, using data collected through sensors. Deep learning sigmoid based neural network clustering and score based Scheduling approaches are used to determine entropy for each fog node in the fog layer. Sensors collect data and send it to the fog layer, while the cloud computing tier is responsible for monitoring the healthcare system. The exploratory findings show promising results in terms of end-to-end latency and network utilization. Also, the proposed system outperforms the existing techniques in terms of average delay.}, } @article {pmid35463290, year = {2022}, author = {He, J}, title = {Cloud Computing Load Balancing Mechanism Taking into Account Load Balancing Ant Colony Optimization Algorithm.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {3120883}, pmid = {35463290}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; Computers ; Computing Methodologies ; }, abstract = {The networking scale and traffic have exploded. At the same time, the rapid development of virtualization and cloud computing technologies not only poses a considerable challenge to the endurance of the network, but also causes more and more problems to the traditional network architecture with IP as the core. Cloud computing is a supercomputing model based on the Internet. With the rapid growth of network access and data traffic, the processing power and computing intensity will also increase, and a single server cannot afford the increase in business. In order to reduce network pressure and improve computing efficiency, load balancing for network computing is particularly important. This paper uses ant colony algorithm to design cloud computing load balance. The ant colony algorithm runs in the controller. According to the real-time network load situation provided by the controller, it calculates the link with the smallest load and provides a dynamic data stream forwarding strategy. The result of the experiments shows that the load-balanced ACO optimized technique can significantly provide an improved computational response. In the ACO algorithm, the average response time is about 30% lower than that in other algorithms. This shows that the use of the ant colony algorithm achieves a good optimization effect.}, } @article {pmid35463282, year = {2022}, author = {Yadav, S and Tiwari, N}, title = {An Efficient and Secure Data Sharing Method Using Asymmetric Pairing with Shorter Ciphertext to Enable Rapid Learning in Healthcare.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4788031}, pmid = {35463282}, issn = {1687-5273}, mesh = {*COVID-19 ; Cloud Computing ; *Computer Security ; Delivery of Health Care ; Humans ; Information Dissemination ; }, abstract = {The recent advent of cloud computing provides a flexible way to effectively share data among multiple users. Cloud computing and cryptographic primitives are changing the way of healthcare unprecedentedly by providing real-time data sharing cost-effectively. Sharing various data items from different users to multiple sets of legitimate subscribers in the cloud environment is a challenging issue. The online electronic healthcare system requires multiple data items to be shared by different users for various purposes. In the present scenario, COVID-19 data is sensitive and must be encrypted to ensure data privacy. Secure sharing of such information is crucial. The standard broadcast encryption system is inefficient for this purpose. Multichannel broadcast encryption is a mechanism that enables secure sharing of different messages to different set of users efficiently. We propose an efficient and secure data sharing method with shorter ciphertext in public key setting using asymmetric (Type-III) pairings. The Type-III setting is the most efficient form among all pairing types regarding operations required and security. The semantic security of this method is proven under decisional BDHE complexity assumption without random oracle model.}, } @article {pmid35463252, year = {2022}, author = {You, L and Sun, H}, title = {Research and Design of Docker Technology Based Authority Management System.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {5325694}, pmid = {35463252}, issn = {1687-5273}, mesh = {*Cloud Computing ; Databases, Factual ; Humans ; *Software ; Technology ; }, abstract = {With the development of mobile Internet technology and the continuous popularization of the network, various kinds of network software come out constantly and people are becoming more and more dependent on them, while role authority management is of great importance for the security of software, the control of management process, and the usability of users. In terms of system implementation, virtual machine technology is often faced with problems such as high virtualization overhead, poor scalability, and long deployment time in spite of its good isolation effect. Container technology represented by Docker can well solve these problems and make it possible to quickly build, deploy, operate, and maintain as well as expand services. Based on Docker technology, this research compares and chooses from various authority control models and finally decides to take the role authority management model as the infrastructure. It designs the role authority control model based on cloud computing and Docker technology in combination with the Task Controller Function, the Project Controller Function, and the User Controller Function and realizes this model by adopting the MongoDB database combined with HTML/CSS/Javascript syntax and the Boor Strap framework. After the test, it is found that the Docker technology based role authority management system has satisfactory test performance consistent with expected outputs as well as strong robustness, which can meet the requirements of different objects and subjects.}, } @article {pmid35463235, year = {2022}, author = {Gong, R and Ge, N and Li, J}, title = {Real-Time Detection of Body Nutrition in Sports Training Based on Cloud Computing and Somatosensory Network.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9911905}, pmid = {35463235}, issn = {1687-5273}, mesh = {Body Composition ; *Cloud Computing ; Glucose/analysis ; Humans ; *Sports ; Sweat/chemistry ; }, abstract = {With the progress of society and the improvement of living standards, sports training has gradually become an area of increasing concern for society and individuals. To more comprehensively grasp the physical function, body shape, and physical fitness of athletes, many researchers have conducted extensive research on the real-time detection of human body nutrition. This study is mainly supported by cloud computing and somatosensory network technology, and the real-time detection of human body composition in sports training is the main research object. In the experiment, two methods of human body composition detection were tested: the BIA method and the body composition analysis method based on the electrochemical sensor of body sweat. It designed a human nutrient composition detection system based on the BIA method. The error rate of the system is relatively small, which is basically maintained at about 2%. It uses a body surface sweat electrochemical sensor to detect changes in glucose concentration during human exercise. After exercising for a period of time, the test subject's sweat glucose concentration remained around 0.5 mM.}, } @article {pmid35459005, year = {2022}, author = {Franchi, F and Marotta, A and Rinaldi, C and Graziosi, F and Fratocchi, L and Parisse, M}, title = {What Can 5G Do for Public Safety? Structural Health Monitoring and Earthquake Early Warning Scenarios.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {8}, pages = {}, pmid = {35459005}, issn = {1424-8220}, support = {135//Governo Italiano/ ; }, mesh = {Cell Phone ; *Earthquakes ; Reproducibility of Results ; }, abstract = {The 5th generation of mobile networks has come to the market bringing the promise of disruptive performances as low latency, availability and reliability, imposing the development of the so-called "killer applications". This contribution presents a 5G use case in the context of Structural Health Monitoring which guarantees an unprecedented level of reliability when exploited for public safety purposes as Earthquake Early Warning. The interest on this topic is at first justified through a deep market analysis, and subsequently declined in terms of public safety benefits. A specific sensor board, guaranteeing real-time processing and 5G connectivity, is presented as the foundation on which the architecture of the network is designed and developed. Advantages of 5G-enabled urban safety are then discussed and proven in the experimentation results, showing that the proposed architecture guarantees lower latency delays and overcome the impairments of cloud solutions especially in terms of delays variability.}, } @article {pmid35458932, year = {2022}, author = {Jeon, S and Kim, MS}, title = {End-to-End Lip-Reading Open Cloud-Based Speech Architecture.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {8}, pages = {}, pmid = {35458932}, issn = {1424-8220}, support = {NRF-2018X1A3A1069795//National Research Foundation of Korea/ ; }, mesh = {*Artificial Intelligence ; Cloud Computing ; Neural Networks, Computer ; *Speech ; Speech Recognition Software ; }, abstract = {Deep learning technology has encouraged research on noise-robust automatic speech recognition (ASR). The combination of cloud computing technologies and artificial intelligence has significantly improved the performance of open cloud-based speech recognition application programming interfaces (OCSR APIs). Noise-robust ASRs for application in different environments are being developed. This study proposes noise-robust OCSR APIs based on an end-to-end lip-reading architecture for practical applications in various environments. Several OCSR APIs, including Google, Microsoft, Amazon, and Naver, were evaluated using the Google Voice Command Dataset v2 to obtain the optimum performance. Based on performance, the Microsoft API was integrated with Google's trained word2vec model to enhance the keywords with more complete semantic information. The extracted word vector was integrated with the proposed lip-reading architecture for audio-visual speech recognition. Three forms of convolutional neural networks (3D CNN, 3D dense connection CNN, and multilayer 3D CNN) were used in the proposed lip-reading architecture. Vectors extracted from API and vision were classified after concatenation. The proposed architecture enhanced the OCSR API average accuracy rate by 14.42% using standard ASR evaluation measures along with the signal-to-noise ratio. The proposed model exhibits improved performance in various noise settings, increasing the dependability of OCSR APIs for practical applications.}, } @article {pmid35456492, year = {2022}, author = {Lim, HG and Hsiao, SH and Fann, YC and Lee, YG}, title = {Robust Mutation Profiling of SARS-CoV-2 Variants from Multiple Raw Illumina Sequencing Data with Cloud Workflow.}, journal = {Genes}, volume = {13}, number = {4}, pages = {}, pmid = {35456492}, issn = {2073-4425}, support = {HHSN261201400008C/NH/NIH HHS/United States ; }, mesh = {*COVID-19/genetics ; High-Throughput Nucleotide Sequencing ; Humans ; Mutation ; *SARS-CoV-2/genetics ; Spike Glycoprotein, Coronavirus/genetics ; Workflow ; }, abstract = {Several variants of the novel severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2) are emerging all over the world. Variant surveillance from genome sequencing has become crucial to determine if mutations in these variants are rendering the virus more infectious, potent, or resistant to existing vaccines and therapeutics. Meanwhile, analyzing many raw sequencing data repeatedly with currently available code-based bioinformatics tools is tremendously challenging to be implemented in this unprecedented pandemic time due to the fact of limited experts and computational resources. Therefore, in order to hasten variant surveillance efforts, we developed an installation-free cloud workflow for robust mutation profiling of SARS-CoV-2 variants from multiple Illumina sequencing data. Herein, 55 raw sequencing data representing four early SARS-CoV-2 variants of concern (Alpha, Beta, Gamma, and Delta) from an open-access database were used to test our workflow performance. As a result, our workflow could automatically identify mutated sites of the variants along with reliable annotation of the protein-coding genes at cost-effective and timely manner for all by harnessing parallel cloud computing in one execution under resource-limitation settings. In addition, our workflow can also generate a consensus genome sequence which can be shared with others in public data repositories to support global variant surveillance efforts.}, } @article {pmid35440942, year = {2022}, author = {Meng, S and Zhang, X}, title = {The Use of Internet of Things and Cloud Computing Technology in the Performance Appraisal Management of Innovation Capability of University Scientific Research Team.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9423718}, pmid = {35440942}, issn = {1687-5273}, mesh = {*Cloud Computing ; Humans ; *Internet of Things ; Technology ; Universities ; }, abstract = {This study aims to speed up the progress of scientific research projects in colleges and universities, continuously improve the innovation ability of scientific research teams in colleges and universities, and optimize the current management methods of performance appraisal of college innovation ability. Firstly, the needs of the innovation performance evaluation system are analyzed, and the corresponding innovation performance evaluation index system of scientific research team is constructed. Secondly, the Internet of Things (IoT) combines the Field Programmable Gate Array (FPGA) to build an innovation capability performance appraisal management terminal. Thirdly, the lightweight deep network has been built into the innovation ability performance assessment management network of university scientific research teams, which relates to the innovation performance assessment index system of scientific research teams. Finally, the system performance is tested. The results show that the proposed method has different degrees of compression for MobileNet, which can significantly reduce the network computation and retain the original recognition ability. Models whose Floating-Point Operations (FLOPs) are reduced by 70% to 90% have 3.6 to 14.3 times fewer parameters. Under different pruning rates, the proposed model has higher model compression rate and recognition accuracy than other models. The results also show that the output of the results is closely related to the interests of the research team. The academic influence score of Team 1 is 0.17, which is the highest among the six groups in this experimental study, indicating that Team 1 has the most significant academic influence. These results provide certain data support and method reference for evaluating the innovation ability of scientific research teams in colleges and universities and contribute to the comprehensive development of efficient scientific research teams.}, } @article {pmid35437463, year = {2022}, author = {Wang, Z}, title = {An Intelligent Collection System of Big Data in Medical and Health Education Based on the Internet of Things.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {3735102}, pmid = {35437463}, issn = {2040-2309}, mesh = {Big Data ; Cloud Computing ; Computer Security ; Health Education ; Humans ; Internet ; *Internet of Things ; }, abstract = {The Internet of Medical Things has developed rapidly as an important direction in the field of Internet of Things, especially through the use of the new generation of information technology, theoretical and applied research on medical IoT intelligent health management that provides "full-service." It has become a research hotspot of many universities and research institutions. Therefore, conducting research on intelligent health management in the network of medical things is of great engineering importance and theoretical guidance to improve the level of medical information. For health management in the network of medical things to conduct research around the goal of health management "Everything full spectrum for everyone," analyzed the lack of sharing of health information in current health management, lack of continuous monitoring and management of health indicators, etc., a new "individual-family-community-hospital" four-level intelligent health management service model is proposed, the hardware architecture of intelligent healthcare management and the software maintenance system have been built. Through methods such as real-time multi-source data collection, mobile sensing, cloud computing, multi-network fusion technology, continuous monitoring and intelligent management of health data is realized convenient, fast and efficient. It solves the problems that the existing medical system cannot meet the multi-level health needs, personal data security and privacy protection, etc., it has achieved the goal of real-time interactive health management of regionalization, multi-level and multi-center, and whole-person, whole-process and all-round.}, } @article {pmid35433703, year = {2022}, author = {Pennington, A and King, ONF and Tun, WM and Ho, EML and Luengo, I and Darrow, MC and Basham, M}, title = {SuRVoS 2: Accelerating Annotation and Segmentation for Large Volumetric Bioimage Workflows Across Modalities and Scales.}, journal = {Frontiers in cell and developmental biology}, volume = {10}, number = {}, pages = {842342}, pmid = {35433703}, issn = {2296-634X}, abstract = {As sample preparation and imaging techniques have expanded and improved to include a variety of options for larger sized and numbers of samples, the bottleneck in volumetric imaging is now data analysis. Annotation and segmentation are both common, yet difficult, data analysis tasks which are required to bring meaning to the volumetric data. The SuRVoS application has been updated and redesigned to provide access to both manual and machine learning-based segmentation and annotation techniques, including support for crowd sourced data. Combining adjacent, similar voxels (supervoxels) provides a mechanism for speeding up segmentation both in the painting of annotation and by training a segmentation model on a small amount of annotation. The support for layers allows multiple datasets to be viewed and annotated together which, for example, enables the use of correlative data (e.g. crowd-sourced annotations or secondary imaging techniques) to guide segmentation. The ability to work with larger data on high-performance servers with GPUs has been added through a client-server architecture and the Pytorch-based image processing and segmentation server is flexible and extensible, and allows the implementation of deep learning-based segmentation modules. The client side has been built around Napari allowing integration of SuRVoS into an ecosystem for open-source image analysis while the server side has been built with cloud computing and extensibility through plugins in mind. Together these improvements to SuRVoS provide a platform for accelerating the annotation and segmentation of volumetric and correlative imaging data across modalities and scales.}, } @article {pmid35432824, year = {2022}, author = {Mir, MH and Jamwal, S and Mehbodniya, A and Garg, T and Iqbal, U and Samori, IA}, title = {IoT-Enabled Framework for Early Detection and Prediction of COVID-19 Suspects by Leveraging Machine Learning in Cloud.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {7713939}, pmid = {35432824}, issn = {2040-2309}, mesh = {Algorithms ; Bayes Theorem ; *COVID-19/diagnosis ; Humans ; Machine Learning ; Support Vector Machine ; }, abstract = {COVID-19 is the repugnant but the most searched word since its outbreak in November 2019 across the globe. The world has to battle with it until an effective solution is developed. Due to the advancement in mobile and sensor technology, it is possible to come up with Internet of things-based healthcare systems. These novel healthcare systems can be proactive and preventive rather than traditional reactive healthcare systems. This article proposes a real-time IoT-enabled framework for the detection and prediction of COVID-19 suspects in early stages, by collecting symptomatic data and analyzing the nature of the virus in a better manner. The framework computes the presence of COVID-19 virus by mining the health parameters collected in real time from sensors and other IoT devices. The framework is comprised of four main components: user system or data collection center, data analytic center, diagnostic system, and cloud system. To point out and detect the COVID-19 suspected in real time, this work proposes the five machine learning techniques, namely support vector machine (SVM), decision tree, naïve Bayes, logistic regression, and neural network. In our proposed framework, the real and primary dataset collected from SKIMS, Srinagar, is used to validate our work. The experiment on the primary dataset was conducted using different machine learning techniques on selected symptoms. The efficiency of algorithms is calculated by computing the results of performance metrics such as accuracy, precision, recall, F1 score, root-mean-square error, and area under the curve score. The employed machine learning techniques have shown the accuracy of above 95% on the primary symptomatic data. Based on the experiment conducted, the proposed framework would be effective in the early identification and prediction of COVID-19 suspect realizing the nature of the disease in better way.}, } @article {pmid35432577, year = {2022}, author = {Mohamed Akram, K and Sihem, S and Okba, K and Harous, S}, title = {IoMT-fog-cloud based architecture for Covid-19 detection.}, journal = {Biomedical signal processing and control}, volume = {76}, number = {}, pages = {103715}, pmid = {35432577}, issn = {1746-8094}, abstract = {Nowadays, coronavirus disease 2019 (COVID-19) is the world-wide pandemic due to its mutation over time. Several works done for covid-19 detection using different techniques however, the use of small datasets and the lack of validation tests still limit their works. Also, they depend only on the increasing the accuracy and the precision of the model without giving attention to their complexity which is one of the main conditions in the healthcare application. Moreover, the majority of healthcare applications with cloud computing use centralization transmission process of various and vast volumes of information what make the privacy and security of personal patient's data easy for hacking. Furthermore, the traditional architecture of the cloud showed many weaknesses such as the latency and the low persistent performance.

In our system, we used Discrete Wavelet transform (DWT) and Principal Component Analysis (PCA) and different energy tracking methods such as Teager Kaiser Energy Operator (TKEO), Shannon Wavelet Entropy Energy (SWEE), Log Energy Entropy (LEE) for preprocessing the dataset. For the first step, DWT used to decompose the image into coefficients where each coefficient is vector of features. Then, we apply PCA for reduction the dimension by choosing the most essential features in features map. Moreover, we used TKEO, SHEE, LEE to track the energy in the features in order to select the best and the most optimal features to reduce the complexity of the model. Also, we used CNN model that contains convolution and pooling layers due to its efficacity in image processing. Furthermore, we depend on deep neurons using small kernel windows which provide better features learning and minimize the model's complexity.The used DWT-PCA technique with TKEO filtering technique showed great results in terms of noise measure where the Peak Signal-to-Noise Ratio (PSNR) was 3.14 dB and the Signal-to-Noise Ratio (SNR) of original and preprocessed image was 1.48, 1.47 respectively which guaranteed the performance of the filtering techniques.The experimental results of the CNN model ensure the high performance of the proposed system in classifying the covid-19, pneumonia and normal cases with 97% of accuracy, 100% of precession, 97% of recall, 99% of F1-score, and 98% of AUC.

The use of DWT-PCA and TKEO optimize the selection of the optimal features and reduce the complexity of the model.The proposed system achieves good results in identifying covid-19, pneumonia and normal cases.The implementation of fog computing as an intermediate layer to solve the latency problem and computational cost which improve the Quality of Service (QoS) of the cloud.Fog computing ensure the privacy and security of the patients' data.With further refinement and validation, the IFC-Covid system will be real-time and effective application for covid-19 detection, which is user friendly and costless.}, } @article {pmid35430649, year = {2022}, author = {Farhadi, H and Mokhtarzade, M and Ebadi, H and Beirami, BA}, title = {Rapid and automatic burned area detection using sentinel-2 time-series images in google earth engine cloud platform: a case study over the Andika and Behbahan Regions, Iran.}, journal = {Environmental monitoring and assessment}, volume = {194}, number = {5}, pages = {369}, pmid = {35430649}, issn = {1573-2959}, mesh = {*Cloud Computing ; Environmental Monitoring ; Humans ; Iran ; Search Engine ; Water ; }, abstract = {For proper forest management, accurate detection and mapping of burned areas are needed, yet the practice is difficult to perform due to the lack of an appropriate method, time, and expense. It is also critical to obtain accurate information about the density and distribution of burned areas in a large forest and vegetated areas. For the most efficient and up-to-date mapping of large areas, remote sensing is one of the best technologies. However, the complex image scenario and the similar spectral behavior of classes in multispectral satellite images may lead to many false-positive mistakes, making it challenging to extract the burned areas accurately. This research aims to develop an automated framework in the Google Earth Engine (GEE) cloud computing platform for detecting burned areas in Andika and Behbahan, located in the south and southwest of Iran, using Sentinel-2 time-series images. After importing the images and applying the necessary preprocessing, the Sentinel-2 Burned Areas Index (BAIS2) was used to create a map of the Primary Burned Areas (PBA). Detection accuracy was then improved by masking out disturbing classes (vegetation and water) on the PBA map, which resulted in Final Burned Areas (FBA). The unimodal method is used to calculate the ideal thresholds of indices to make the proposed method automatic. The final results demonstrated that the proposed method performed well in both homogeneous and heterogeneous areas for detecting the burned areas. Based on a test dataset, maps of burned areas were produced in the Andika and Behbahan regions with an overall accuracy of 90.11% and 92.40% and a kappa coefficient of 0.87 and 0.88, respectively, which were highly accurate when compared to the BAIS2, Normalized Burn Ratio (NBR), Normalized Difference Vegetation Index (NDVI), Mid-Infrared Bispectral Index (MIRBI), and Normalized Difference SWIR (NDSWIR) indices. Based on the results, accurate determination of vegetation classes and water zones and eliminating them from the map of burned areas led to a considerable increase in the accuracy of the obtained final map from the BAIS2 spectral index.}, } @article {pmid35428085, year = {2022}, author = {Yaacoby, R and Schaar, N and Kellerhals, L and Raz, O and Hermelin, D and Pugatch, R}, title = {Comparison between a quantum annealer and a classical approximation algorithm for computing the ground state of an Ising spin glass.}, journal = {Physical review. E}, volume = {105}, number = {3-2}, pages = {035305}, doi = {10.1103/PhysRevE.105.035305}, pmid = {35428085}, issn = {2470-0053}, abstract = {Finding the ground state of an Ising spin glass on general graphs belongs to the class of NP-hard problems, widely believed to have no efficient polynomial-time algorithms to solve them. An approach developed in computer science for dealing with such problems is to devise approximation algorithms; these are algorithms, whose run time scales polynomially with the input size, that provide solutions with provable guarantees on their quality in terms of the optimal unknown solution. Recently, several algorithms for the Ising spin-glass problem on a bounded degree graph that provide different approximation guarantees were introduced. D-Wave, a Canadian-based company, has constructed a physical realization of a quantum annealer and has enabled researchers and practitioners to access it via their cloud service. D-Wave is particularly suited for computing an approximation for the ground state of an Ising spin glass on its Chimera and Pegasus graphs-both with a bounded degree. To assess the quality of D-Wave's solution, it is natural to compare it to classical approximation algorithms specifically designed to solve the same problem. In this work, we compare the performance of a recently developed approximation algorithm to solve the Ising spin-glass problem on graphs of bounded degree against the performance of the D-Wave computer. We also compared the performance of D-Wave's computer in the Chimera architecture against the performance of a heuristic tailored specifically to handle the Chimera graph. We found that the D-Wave computer was able to find better approximations for all the random instances of the problem we studied-Gaussian weights, uniform weights, and discrete binary weights. Furthermore, the convergence times of D-Wave's computer were also significantly better. These results indicate the merit of D-Wave's computer under certain specific instances. More broadly, our method is relevant to a wider class of performance comparison studies, and we suggest that it is important to compare the performance of quantum computers not only against exact classical algorithms with exponential run-time scaling, but also against approximation algorithms with polynomial run-time scaling and a provable guarantee of performance.}, } @article {pmid35421313, year = {2022}, author = {Zhang, S and Thompson, JP and Xia, J and Bogetti, AT and York, F and Skillman, AG and Chong, LT and LeBard, DN}, title = {Mechanistic Insights into Passive Membrane Permeability of Drug-like Molecules from a Weighted Ensemble of Trajectories.}, journal = {Journal of chemical information and modeling}, volume = {62}, number = {8}, pages = {1891-1904}, pmid = {35421313}, issn = {1549-960X}, support = {R01 GM115805/GM/NIGMS NIH HHS/United States ; }, mesh = {Cell Membrane Permeability ; Diffusion ; *Lipid Bilayers ; Molecular Dynamics Simulation ; Permeability ; *Phosphatidylcholines ; }, abstract = {Passive permeability of a drug-like molecule is a critical property assayed early in a drug discovery campaign that informs a medicinal chemist how well a compound can traverse biological membranes, such as gastrointestinal epithelial or restrictive organ barriers, so it can perform a specific therapeutic function. However, the challenge that remains is the development of a method, experimental or computational, which can both determine the permeation rate and provide mechanistic insights into the transport process to help with the rational design of any given molecule. Typically, one of the following three methods are used to measure the membrane permeability: (1) experimental permeation assays acting on either artificial or natural membranes; (2) quantitative structure-permeability relationship models that rely on experimental values of permeability or related pharmacokinetic properties of a range of molecules to infer those for new molecules; and (3) estimation of permeability from the Smoluchowski equation, where free energy and diffusion profiles along the membrane normal are taken as input from large-scale molecular dynamics simulations. While all these methods provide estimates of permeation coefficients, they provide very little information for guiding rational drug design. In this study, we employ a highly parallelizable weighted ensemble (WE) path sampling strategy, empowered by cloud computing techniques, to generate unbiased permeation pathways and permeability coefficients for a set of drug-like molecules across a neat 1-palmitoyl-2-oleoyl-sn-glycero-3-phosphatidylcholine membrane bilayer. Our WE method predicts permeability coefficients that compare well to experimental values from an MDCK-LE cell line and PAMPA assays for a set of drug-like amines of varying size, shape, and flexibility. Our method also yields a series of continuous permeation pathways weighted and ranked by their associated probabilities. Taken together, the ensemble of reactive permeation pathways, along with the estimate of the permeability coefficient, provides a clearer picture of the microscopic underpinnings of small-molecule membrane permeation.}, } @article {pmid35417349, year = {2023}, author = {Liu, Q and Su, H and Duanmu, Z and Liu, W and Wang, Z}, title = {Perceptual Quality Assessment of Colored 3D Point Clouds.}, journal = {IEEE transactions on visualization and computer graphics}, volume = {29}, number = {8}, pages = {3642-3655}, doi = {10.1109/TVCG.2022.3167151}, pmid = {35417349}, issn = {1941-0506}, mesh = {*Computer Graphics ; Databases, Factual ; *Multimedia ; Research Design ; }, abstract = {3D point clouds have found a wide variety of applications in multimedia processing, remote sensing, and scientific computing. Although most point cloud processing systems are developed to improve viewer experiences, little work has been dedicated to perceptual quality assessment of 3D point clouds. In this work, we build a new 3D point cloud database, namely the Waterloo Point Cloud (WPC) database. In contrast to existing datasets consisting of small-scale and low-quality source content of constrained viewing angles, the WPC database contains 20 high quality, realistic, and omni-directional source point clouds and 740 diversely distorted point clouds. We carry out a subjective quality assessment experiment over the database in a controlled lab environment. Our statistical analysis suggests that existing objective point cloud quality assessment (PCQA) models only achieve limited success in predicting subjective quality ratings. We propose a novel objective PCQA model based on an attention mechanism and a variant of information content-weighted structural similarity, which significantly outperforms existing PCQA models. The database has been made publicly available at https://github.com/qdushl/Waterloo-Point-Cloud-Database.}, } @article {pmid35411128, year = {2022}, author = {Wang, X and Carey, MJ and Tsotras, VJ}, title = {Subscribing to big data at scale.}, journal = {Distributed and parallel databases}, volume = {40}, number = {2-3}, pages = {475-520}, pmid = {35411128}, issn = {1573-7578}, abstract = {Today, data is being actively generated by a variety of devices, services, and applications. Such data is important not only for the information that it contains, but also for its relationships to other data and to interested users. Most existing Big Data systems focus on passively answering queries from users, rather than actively collecting data, processing it, and serving it to users. To satisfy both passive and active requests at scale, application developers need either to heavily customize an existing passive Big Data system or to glue one together with systems like Streaming Engines and Pub-sub services. Either choice requires significant effort and incurs additional overhead. In this paper, we present the BAD (Big Active Data) system as an end-to-end, out-of-the-box solution for this challenge. It is designed to preserve the merits of passive Big Data systems and introduces new features for actively serving Big Data to users at scale. We show the design and implementation of the BAD system, demonstrate how BAD facilitates providing both passive and active data services, investigate the BAD system's performance at scale, and illustrate the complexities that would result from instead providing BAD-like services with a "glued" system.}, } @article {pmid35408281, year = {2022}, author = {Filho, CP and Marques, E and Chang, V and Dos Santos, L and Bernardini, F and Pires, PF and Ochi, L and Delicato, FC}, title = {A Systematic Literature Review on Distributed Machine Learning in Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {7}, pages = {}, pmid = {35408281}, issn = {1424-8220}, support = {2015/24144-7//São Paulo Research Foundation/ ; E-26/200.938/2021//Fundação Carlos Chagas Filho de Amparo à Pesquisa do Estado do Rio de Janeiro/ ; }, mesh = {*Algorithms ; Intelligence ; *Machine Learning ; Publications ; }, abstract = {Distributed edge intelligence is a disruptive research area that enables the execution of machine learning and deep learning (ML/DL) algorithms close to where data are generated. Since edge devices are more limited and heterogeneous than typical cloud devices, many hindrances have to be overcome to fully extract the potential benefits of such an approach (such as data-in-motion analytics). In this paper, we investigate the challenges of running ML/DL on edge devices in a distributed way, paying special attention to how techniques are adapted or designed to execute on these restricted devices. The techniques under discussion pervade the processes of caching, training, inference, and offloading on edge devices. We also explore the benefits and drawbacks of these strategies.}, } @article {pmid35408246, year = {2022}, author = {Rakrouki, MA and Alharbe, N}, title = {QoS-Aware Algorithm Based on Task Flow Scheduling in Cloud Computing Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {7}, pages = {}, pmid = {35408246}, issn = {1424-8220}, mesh = {*Algorithms ; *Cloud Computing ; Gravitation ; }, abstract = {This paper deals with the challenging problem of scheduling users' tasks, while taking into consideration users' quality of service (QoS) requirements, with the objective of reducing the energy consumption of physical machines. This paper presents a model to analyze the current state of the running tasks according to the results of the QoS prediction assigned by an ARIMA prediction model optimized with Kalman filter. Then, we calculate a scheduling policy with a combined particle swarm optimization (PSO) and gravitational search algorithm (GSA) algorithms according to the QoS status analysis. Experimental results show that the proposed HPSO algorithm reduces resources consumption 16.51% more than the original hybrid algorithm, and the violation of service-level agreement (SLA) is 0.053% less when the optimized prediction model is used.}, } @article {pmid35408212, year = {2022}, author = {Ji, X and Wei, H and Chen, Y and Ji, XF and Wu, G}, title = {A Three-Stage Dynamic Assessment Framework for Industrial Control System Security Based on a Method of W-HMM.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {7}, pages = {}, pmid = {35408212}, issn = {1424-8220}, support = {2019YFB1312202//the National Key R&D Program of China/ ; }, mesh = {Algorithms ; *Artificial Intelligence ; Big Data ; *Cloud Computing ; Machine Learning ; }, abstract = {Industrial control systems (ICS) are applied in many fields. Due to the development of cloud computing, artificial intelligence, and big data analysis inducing more cyberattacks, ICS always suffers from the risks. If the risks occur during system operations, corporate capital is endangered. It is crucial to assess the security of ICS dynamically. This paper proposes a dynamic assessment framework for industrial control system security (DAF-ICSS) based on machine learning and takes an industrial robot system as an example. The framework conducts security assessment from qualitative and quantitative perspectives, combining three assessment phases: static identification, dynamic monitoring, and security assessment. During the evaluation, we propose a weighted Hidden Markov Model (W-HMM) to dynamically establish the system's security model with the algorithm of Baum-Welch. To verify the effectiveness of DAF-ICSS, we have compared it with two assessment methods to assess industrial robot security. The comparison result shows that the proposed DAF-ICSS can provide a more accurate assessment. The assessment reflects the system's security state in a timely and intuitive manner. In addition, it can be used to analyze the security impact caused by the unknown types of ICS attacks since it infers the security state based on the explicit state of the system.}, } @article {pmid35408111, year = {2022}, author = {Laiton-Bonadiez, C and Branch-Bedoya, JW and Zapata-Cortes, J and Paipa-Sanabria, E and Arango-Serna, M}, title = {Industry 4.0 Technologies Applied to the Rail Transportation Industry: A Systematic Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {7}, pages = {}, pmid = {35408111}, issn = {1424-8220}, mesh = {*Artificial Intelligence ; Big Data ; Cloud Computing ; *Internet of Things ; Technology ; }, abstract = {BACKGROUND: Industry 4.0 technologies have been widely used in the railway industry, focusing mainly on maintenance and control tasks necessary in the railway infrastructure. Given the great potential that these technologies offer, the scientific community has come to use them in varied ways to solve a wide range of problems such as train failures, train station security, rail system control and communication in hard-to-reach areas, among others. For this reason, this paper aims to answer the following research questions: what are the main issues in the railway transport industry, what are the technologic strategies that are currently being used to solve these issues and what are the technologies from industry 4.0 that are used in the railway transport industry to solve the aforementioned issues?

METHODS: This study adopts a systematic literature review approach. We searched the Science Direct and Web of Science database inception from January 2017 to November 2021. Studies published in conferences or journals written in English or Spanish were included for initial process evaluation. The initial included papers were analyzed by authors and selected based on whether they helped answer the proposed research questions or not.

RESULTS: Of the recovered 515 articles, 109 were eligible, from which we could identify three main application domains in the railway industry: monitoring, decision and planification techniques, and communication and security. Regarding industry 4.0 technologies, we identified 9 different technologies applied in reviewed studies: Artificial Intelligence (AI), Internet of Things (IoT), Cloud Computing, Big Data, Cybersecurity, Modelling and Simulation, Smart Decision Support Systems (SDSS), Computer Vision and Virtual Reality (VR). This study is, to our knowledge, one of the first to show how industry 4.0 technologies are currently being used to tackle railway industry problems and current application trends in the scientific community, which is highly useful for the development of future studies and more advanced solutions.

FUNDING: Colombian national organizations Minciencias and the Mining-Energy Planning Unit.}, } @article {pmid35408047, year = {2022}, author = {Wang, Z and Wang, W and Zhang, Z and Hu, F and Xia, X and Chen, L}, title = {DeepEdge: A Novel Appliance Identification Edge Platform for Data Gathering, Capturing and Labeling.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {7}, pages = {}, pmid = {35408047}, issn = {1424-8220}, support = {62072319//National Natural Science Foundation of China/ ; 2019JDTD0001//Sichuan Science and Technology Program/ ; 2018YFB1601200//National Key Research and Development Program of China/ ; 2018YFB1601201//National Key Research and Development Program of China/ ; 2021CDLZ-11//Luzhou Science and Technology Innovation R&D Program/ ; 2021-YF05-02000-SN//Chengdu Technology Innovation R&D Program/ ; }, mesh = {*Algorithms ; }, abstract = {With the development of the Internet of Things for smart grid, the requirement for appliance monitoring has become an important topic. The first and most important step in appliance monitoring is to identify the type of appliance. Most of the existing appliance identification platforms are cloud based, thus they consume large computing resources and memory. Therefore, it is necessary to explore an edge identification platform with a low cost. In this work, a novel appliance identification edge platform for data gathering, capturing and labeling is proposed. Experiments show that this platform can achieve an average appliance identification accuracy of 98.5% and improve the accuracy of non-intrusive load disaggregation algorithms.}, } @article {pmid35405588, year = {2022}, author = {Feng, X and Jin, X and Zhou, R and Jiang, Q and Wang, Y and Zhang, X and Shang, K and Zhang, J and Yu, C and Shou, J}, title = {Deep learning approach identified a gene signature predictive of the severity of renal damage caused by chronic cadmium accumulation.}, journal = {Journal of hazardous materials}, volume = {433}, number = {}, pages = {128795}, doi = {10.1016/j.jhazmat.2022.128795}, pmid = {35405588}, issn = {1873-3336}, mesh = {Animals ; *Cadmium/metabolism ; Cadmium Chloride/toxicity ; *Deep Learning ; Kidney/metabolism ; Mice ; Mice, Inbred C57BL ; Oxidative Stress ; }, abstract = {Epidemiology studies have indicated that environmental cadmium exposure, even at low levels, will result in chronic cadmium accumulation in the kidney with profound adverse consequences and that the diabetic population is more susceptible. However, the underlying mechanisms are yet not fully understood. In the present study, we applied an animal model to study chronic cadmium exposure-induced renal injury and performed whole transcriptome profiling studies. Repetitive CdCl2 exposure resulted in cadmium accumulation and remarkable renal injuries in the animals. The diabetic ob/ob mice manifested increased severity of renal injury compared with the wild type C57BL/6 J littermate controls. RNA-Seq data showed that cadmium treatment induced dramatic gene expression changes in a dose-dependent manner. Among the differentially expressed genes include the apoptosis hallmark genes which significantly demarcated the treatment effects. Pathway enrichment and network analyses revealed biological oxidation (mainly glucuronidation) as one of the major stress responses induced by cadmium treatment. We next implemented a deep learning algorithm in conjunction with cloud computing and discovered a gene signature that can predict the degree of renal injury induced by cadmium treatment. The present study provided, for the first time, a comprehensive mechanistic understanding of chronic cadmium-induced nephrotoxicity in normal and diabetic populations at the whole genome level.}, } @article {pmid35401026, year = {2022}, author = {Ullah, A and Chakir, A}, title = {Improvement for tasks allocation system in VM for cloud datacenter using modified bat algorithm.}, journal = {Multimedia tools and applications}, volume = {81}, number = {20}, pages = {29443-29457}, pmid = {35401026}, issn = {1380-7501}, abstract = {Since its inception, cloud computing has greatly transformed our lives by connecting the entire world through shared computational resources over the internet. The COVID-19 pandemic has also disrupted the traditional learning and businesses and led us towards an era of cloud-based activities. Virtual machine is one of the main elements of virtualization in cloud computing that represents physical server into the virtual machine. The utilizations of these VM's are important to achieved effective task scheduling mechanism in cloud environment. This paper focuses on improvment of the task distribution system in VM for cloud computing using load balancing technique. For that reason modification took place at Bat algorithm fitness function value this section used in load balancer section. When algorithm iteration are complete then time to distribute the task among different VM therefore in this section of algorithm was modified. The second modification took place at the search process of Bat at dimension section. The proposed algorithm is known as modified Bat algorithm. Four parameter are used to check the performance of the system which are throughput, makespan, degree of imbalance and processing time. The proposed algorithm provides efficient result as compaire to other standard technique. Hence the proposed algorithm improved cloud data center accuracy and efficiency.}, } @article {pmid35396141, year = {2022}, author = {Agrawal, N and Kumar, R}, title = {Security Perspective Analysis of Industrial Cyber Physical Systems (I-CPS): A Decade-wide Survey.}, journal = {ISA transactions}, volume = {130}, number = {}, pages = {10-24}, doi = {10.1016/j.isatra.2022.03.018}, pmid = {35396141}, issn = {1879-2022}, abstract = {Considering the exceptional growth of Cyber Physical Systems (CPSs), multiple and potentially grave security challenges have emerged in this field. Different vulnerabilities and attacks are present in front of new generation CPSs, such as Industrial CPS (I-CPS). The underlying non-uniform standards, device heterogeneity, network complexity, etc., make it difficult to offer a systematized coverage on CPS security in an industrial environment. This work considers the security perspective of I-CPSs, and offers a decade-wide survey including different vulnerabilities, attacks, CPS components, and various other aspects. The comparative year-wise analysis of the existing works w.r.t objective, approach referred, testbed used and derived inference, is also presented over a decade. Additionally, the work details different security issues and research challenges present in I-CPS. This work attempts to offer a concise and precise literature study focused on the state-of-the-art I-CPS security. This work also encourages the young researchers to explore the wide possibilities present in this emerging field.}, } @article {pmid35395654, year = {2022}, author = {Banerjee, AN}, title = {Green syntheses of graphene and its applications in internet of things (IoT)-a status review.}, journal = {Nanotechnology}, volume = {33}, number = {32}, pages = {}, doi = {10.1088/1361-6528/ac6599}, pmid = {35395654}, issn = {1361-6528}, abstract = {Internet of Things (IoT) is a trending technological field that converts any physical object into a communicable smarter one by converging the physical world with the digital world. This innovative technology connects the device to the internet and provides a platform to collect real-time data, cloud storage, and analyze the collected data to trigger smart actions from a remote location via remote notifications, etc. Because of its wide-ranging applications, this technology can be integrated into almost all the industries. Another trending field with tremendous opportunities is Nanotechnology, which provides many benefits in several areas of life, and helps to improve many technological and industrial sectors. So, integration of IoT and Nanotechnology can bring about the very important field of Internet of Nanothings (IoNT), which can re-shape the communication industry. For that, data (collected from trillions of nanosensors, connected to billions of devices) would be the 'ultimate truth', which could be generated from highly efficient nanosensors, fabricated from various novel nanomaterials, one of which is graphene, the so-called 'wonder material' of the 21st century. Therefore, graphene-assisted IoT/IoNT platforms may revolutionize the communication technologies around the globe. In this article, a status review of the smart applications of graphene in the IoT sector is presented. Firstly, various green synthesis of graphene for sustainable development is elucidated, followed by its applications in various nanosensors, detectors, actuators, memory, and nano-communication devices. Also, the future market prospects are discussed to converge various emerging concepts like machine learning, fog/edge computing, artificial intelligence, big data, and blockchain, with the graphene-assisted IoT field to bring about the concept of 'all-round connectivity in every sphere possible'.}, } @article {pmid35394342, year = {2022}, author = {Dall'Alba, G and Casa, PL and Abreu, FP and Notari, DL and de Avila E Silva, S}, title = {A Survey of Biological Data in a Big Data Perspective.}, journal = {Big data}, volume = {10}, number = {4}, pages = {279-297}, doi = {10.1089/big.2020.0383}, pmid = {35394342}, issn = {2167-647X}, mesh = {*Big Data ; Cloud Computing ; *Data Mining/methods ; Machine Learning ; Neural Networks, Computer ; }, abstract = {The amount of available data is continuously growing. This phenomenon promotes a new concept, named big data. The highlight technologies related to big data are cloud computing (infrastructure) and Not Only SQL (NoSQL; data storage). In addition, for data analysis, machine learning algorithms such as decision trees, support vector machines, artificial neural networks, and clustering techniques present promising results. In a biological context, big data has many applications due to the large number of biological databases available. Some limitations of biological big data are related to the inherent features of these data, such as high degrees of complexity and heterogeneity, since biological systems provide information from an atomic level to interactions between organisms or their environment. Such characteristics make most bioinformatic-based applications difficult to build, configure, and maintain. Although the rise of big data is relatively recent, it has contributed to a better understanding of the underlying mechanisms of life. The main goal of this article is to provide a concise and reliable survey of the application of big data-related technologies in biology. As such, some fundamental concepts of information technology, including storage resources, analysis, and data sharing, are described along with their relation to biological data.}, } @article {pmid35392801, year = {2022}, author = {Pallotta, S and Cascianelli, S and Masseroli, M}, title = {RGMQL: scalable and interoperable computing of heterogeneous omics big data and metadata in R/Bioconductor.}, journal = {BMC bioinformatics}, volume = {23}, number = {1}, pages = {123}, pmid = {35392801}, issn = {1471-2105}, support = {693174//h2020 european research council/ ; }, mesh = {Big Data ; Cloud Computing ; Genomics ; *Metadata ; *Software ; }, abstract = {BACKGROUND: Heterogeneous omics data, increasingly collected through high-throughput technologies, can contain hidden answers to very important and still unsolved biomedical questions. Their integration and processing are crucial mostly for tertiary analysis of Next Generation Sequencing data, although suitable big data strategies still address mainly primary and secondary analysis. Hence, there is a pressing need for algorithms specifically designed to explore big omics datasets, capable of ensuring scalability and interoperability, possibly relying on high-performance computing infrastructures.

RESULTS: We propose RGMQL, a R/Bioconductor package conceived to provide a set of specialized functions to extract, combine, process and compare omics datasets and their metadata from different and differently localized sources. RGMQL is built over the GenoMetric Query Language (GMQL) data management and computational engine, and can leverage its open curated repository as well as its cloud-based resources, with the possibility of outsourcing computational tasks to GMQL remote services. Furthermore, it overcomes the limits of the GMQL declarative syntax, by guaranteeing a procedural approach in dealing with omics data within the R/Bioconductor environment. But mostly, it provides full interoperability with other packages of the R/Bioconductor framework and extensibility over the most used genomic data structures and processing functions.

CONCLUSIONS: RGMQL is able to combine the query expressiveness and computational efficiency of GMQL with a complete processing flow in the R environment, being a fully integrated extension of the R/Bioconductor framework. Here we provide three fully reproducible example use cases of biological relevance that are particularly explanatory of its flexibility of use and interoperability with other R/Bioconductor packages. They show how RGMQL can easily scale up from local to parallel and cloud computing while it combines and analyzes heterogeneous omics data from local or remote datasets, both public and private, in a completely transparent way to the user.}, } @article {pmid35387274, year = {2021}, author = {Reza, MNH and Jayashree, S and Malarvizhi, CAN and Rauf, MA and Jayaraman, K and Shareef, SH}, title = {The implications of Industry 4.0 on supply chains amid the COVID-19 pandemic: a systematic review.}, journal = {F1000Research}, volume = {10}, number = {}, pages = {1008}, pmid = {35387274}, issn = {2046-1402}, mesh = {*COVID-19/epidemiology ; Disease Outbreaks ; Humans ; Pandemics ; Technology ; }, abstract = {Background: COVID-19 has caused significant disruptions in supply chains. It has increased the demand for products and decreased the supply of raw materials. This has interrupted many production processes. The emerging technologies of Industry 4.0 have the potential to streamline supply chains by improving time-sensitive customized solutions during this emergency. Purpose: The study identifies the core technologies of Industry 4.0 and the role and impact of these technologies in managing the disruption caused by the COVID-19 outbreak in strengthening the supply chain resilience. Design/methodology/approach: An extensive literature review using the "Preferred Reporting Items for Systematic Review and Meta-Analysis" method was carried out on the impact of the COVID-19 pandemic on supply chains and Industry 4.0 technologies. The study was undertaken by selecting keywords validated by experts, and a search was conducted in the Scopus, ProQuest, and Google Scholar databases. Publications from the leading journals on these topics were selected. The bibliographical search resulted in 1484 articles, followed by multiple layers of filtering. Finally, the most pertinent articles were selected for review, and a total of 42 articles were analyzed. Findings: The findings of the study showed that the majority of the articles emphasized the digitalization of supply chain management, acknowledging the fundamentals, applications, and prospects, revealing the drivers and challenges of Industry 4.0 technologies to manage disruptions. Most of the authors identified IoT, big data, cloud computing, additive manufacturing, and blockchain to maintain the supply chain resilience. Originality/value: Existing literature on epidemics lacks the basics and practices of utilizing Industry 4.0 technologies in the supply chain recovery process. To fill this research gap, the study summarizes the potential of Industry 4.0 technologies to lessen supply chain disruptions caused by COVID-19. The study findings are valuable for policymakers and practitioners and contribute to supply chain management studies.}, } @article {pmid35387251, year = {2022}, author = {Jain, A and Nadeem, A and Majdi Altoukhi, H and Jamal, SS and Atiglah, HK and Elwahsh, H}, title = {Personalized Liver Cancer Risk Prediction Using Big Data Analytics Techniques with Image Processing Segmentation.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8154523}, pmid = {35387251}, issn = {1687-5273}, mesh = {Algorithms ; Cloud Computing ; *Data Science ; Humans ; Image Processing, Computer-Assisted ; *Liver Neoplasms/diagnostic imaging ; }, abstract = {A technology known as data analytics is a massively parallel processing approach that may be used to forecast a wide range of illnesses. Many scientific research methodologies have the problem of requiring a significant amount of time and processing effort, which has a negative impact on the overall performance of the system. Virtual screening (VS) is a drug discovery approach that makes use of big data techniques and is based on the concept of virtual screening. This approach is utilised for the development of novel drugs, and it is a time-consuming procedure that includes the docking of ligands in several databases in order to build the protein receptor. The proposed work is divided into two modules: image processing-based cancer segmentation and analysis using extracted features using big data analytics, and cancer segmentation and analysis using extracted features using image processing. This statistical approach is critical in the development of new drugs for the treatment of liver cancer. Machine learning methods were utilised in the prediction of liver cancer, including the MapReduce and Mahout algorithms, which were used to prefilter the set of ligand filaments before they were used in the prediction of liver cancer. This work proposes the SMRF algorithm, an improved scalable random forest algorithm built on the MapReduce foundation. Using a computer cluster or cloud computing environment, this new method categorises massive datasets. With SMRF, small amounts of data are processed and optimised over a large number of computers, allowing for the highest possible throughput. When compared to the standard random forest method, the testing findings reveal that the SMRF algorithm exhibits the same level of accuracy deterioration but exhibits superior overall performance. The accuracy range of 80 percent using the performance metrics analysis is included in the actual formulation of the medicine that is utilised for liver cancer prediction in this study.}, } @article {pmid35378813, year = {2022}, author = {Jiang, M and Sun, Y}, title = {An Optimized Decision Method for Smart Teaching Effect Based on Cloud Computing and Deep Learning.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6907172}, pmid = {35378813}, issn = {1687-5273}, mesh = {*Cloud Computing ; *Deep Learning ; Humans ; Students ; Universities ; }, abstract = {In order to improve the effect of intelligent teaching and give full play to the role of intelligent technology in modern physical education, in this paper, cloud computing and deep learning methods are used to comprehensively evaluate the teaching effect of colleges and universities, and calculate the evaluation effect and accuracy. Cloud computing and deep learning algorithm combine the teaching evaluation scale, teaching content, and characteristics to formulate teaching plans for different students and realize targeted teaching evaluation. The results show that the teaching evaluation method proposed in this paper can improve students' learning interest by about 30%, enhance learning initiative by about 20%, and the matching rate between the actual teaching effect and the expected requirements is 98%. Therefore, cloud computing and deep learning model can improve the accuracy of teaching effect evaluation in colleges and universities, provide support for the formulation of teaching evaluation schemes, and promote the development of intelligent teaching in colleges and universities.}, } @article {pmid35371335, year = {2022}, author = {Almurisi, N and Tadisetty, S}, title = {Cloud-based virtualization environment for IoT-based WSN: solutions, approaches and challenges.}, journal = {Journal of ambient intelligence and humanized computing}, volume = {13}, number = {10}, pages = {4681-4703}, pmid = {35371335}, issn = {1868-5137}, abstract = {Internet of Things (IoT) is an ever-growing technology that enables advanced communication among millions of various devices to provide ubiquitous services without human intervention. The potential growth of electronic devices in sensing systems has led to the realization of IoT paradigm where applications depend on sensors to interact with the environment and collect data in a real-time scenario. Nowadays, smart applications require fast data acquisition, parallel processing, and dynamic resource sharing. Unfortunately, these requirements can not be supported efficiently with traditional Wireless Sensor Networks (WSN) due to the deficiency of computing resources and the lack of resource-sharing. Therefore, it is not recommended to develop innovative applications based on these constrained devices without further enhancement and improvement. Hence, this article explores a coeffective solution based on Cloud Computing and Virtualization Techniques to address these challenges. Cloud computing provides efficient computing resources and huge storage space, while the virtualization technique allows resources to be virtualized and shared between various applications. Integrating IoT-WSN with the Cloud-based Virtualization Environment will eliminate the drawbacks and limitations of conventional networks and facilitate the development of novel applications in a more flexible way. Furthermore, this article reviews the recent trends in IoT-WSN, virtualization techniques, and cloud computing. Also, we present the integration process of sensor networks with Cloud-based Virtualization and propose a new general architecture view for the Sensor-Cloud paradigm, and discuss its key elements, basic principles, lifecycle operation, and outline its advantages and disadvantages. Finally, we review the state-of-the-art, present the major challenges, and suggest future work directions.}, } @article {pmid35371196, year = {2022}, author = {Shang, R and Qin, Y}, title = {Research on Humanistic Quality Higher Medical Education Based on Internet of Things and Intelligent Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8633190}, pmid = {35371196}, issn = {1687-5273}, mesh = {*Education, Medical ; Humans ; *Internet of Things ; }, abstract = {The importance of the humanities in promoting economic and social development is becoming increasingly clear. Combining humanities with higher medical education in order to meet the needs of medical talent training in the new situation has become a key component of higher medical education reform and development. Adult higher medical education is an integral part of higher medical education, but it has different training objectives and training objects than regular higher medical education. These technological advancements are certain to hasten the continued emergence of education cloud or industry cloud, create a good information-based environment for education informatization improvement, and pose technical challenges to resource allocation in intelligent computing environments. Humanistic quality higher medical education based on the Internet of Things and intelligent computing makes the efficient intelligent information system more open, interactive, and coordinated, allowing students and teachers to perceive a variety of teaching resources more comprehensively.}, } @article {pmid35371194, year = {2022}, author = {Ma, S and Hao, F and Lin, Y and Liang, Y}, title = {The Construction of Big Data Computational Intelligence System for E-Government in Cloud Computing Environment and Its Development Impact.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7295060}, pmid = {35371194}, issn = {1687-5273}, mesh = {Algorithms ; Artificial Intelligence ; *Big Data ; *Cloud Computing ; Government ; }, abstract = {The traditional E-government big data system fills and classifies algorithms with low accuracy and poor work efficiency. With the development and wide application of big data, the internet of things, and other technologies, the integration of information resources has become the key to information construction. In the process of information resource integration, there are still outstanding problems such as incomplete government information resource system, different standards of government information resource management system construction, and serious threats to network and information security. In order to solve this problem, a new E-government big data system filling and classification algorithm is studied in the cloud computing environment; E-government big data filling is carried out on the basis of complete compatibility theory; and the E-government big data computing intelligence system in the cloud computing environment is constructed and its development impact, so as to parallelize the data, classify the data through decision trees, and realize incremental update decision forest parallelization processing. To verify the effectiveness of the method, comparative experiments are set, and the results demonstrate that experiment one is randomly built into the classification model, and according to the decision forest algorithm, the optimal number of decision trees is 24.}, } @article {pmid35370340, year = {2022}, author = {Kombaya Touckia, J and Hamani, N and Kermad, L}, title = {Digital twin framework for reconfigurable manufacturing systems (RMSs): design and simulation.}, journal = {The International journal, advanced manufacturing technology}, volume = {120}, number = {7-8}, pages = {5431-5450}, pmid = {35370340}, issn = {0268-3768}, abstract = {Faced with the global crisis of COVID-19 and the strong increase in customer demands, competition is becoming more intense between companies, on the one hand, and supply chains on the other. This competition has led to the development of new strategies to manage demand and increase market share. Among these strategies are the growing interest in sustainable manufacturing and the need for customizable products that create an increasingly complex manufacturing environment. Sustainable manufacturing and the need for customizable products create an environment of increased competition and constant change. Indeed, companies are trying to establish more flexible and agile manufacturing systems through several systems of reconfiguration. Reconfiguration contributes to an extension of the manufacturing system's life cycle by modifying its physical, organizational and IT characteristics according to the changing market conditions. Due to the rapid development of new information technology (such as IoT, Big Data analytics, cyber-physical systems, cloud computing and artificial intelligence), digital twins have become intensively used in smart manufacturing. This paper proposes a digital twin design and simulation model for reconfigurable manufacturing systems (RMSs).}, } @article {pmid35369530, year = {2022}, author = {Taniguchi, Y and Ikegami, Y and Fujikawa, H and Pathare, Y and Kutics, A and Massimo, B and Anisetti, M and Damiani, E and Sakurai, Y and Tsuruta, S}, title = {Counseling (ro)bot as a use case for 5G/6G.}, journal = {Complex & intelligent systems}, volume = {8}, number = {5}, pages = {3899-3917}, pmid = {35369530}, issn = {2198-6053}, abstract = {This paper presents a counseling (ro)bot called Visual Counseling Agent (VICA) which focuses on remote mental healthcare. It is an agent system leveraging artificial intelligence (AI) to aid mentally distressed persons through speech conversation. The system terminals are connected to servers by the Internet exploiting Cloud-nativeness, so that anyone who has any type of terminal can use it from anywhere. Despite a promising voice communication interface, VICA shows limitations in conversation continuity on conventional 4G networks. Concretely, the use of the current 4G networks produces word dropping, delayed response, and the occasional connection failure. The objective of this paper is to mitigate these issues by leveraging a 5G/6G slice inclusive of mobile/multiple edge computing (MEC). First, we propose and partly implement the enhanced and advanced version of VICA. Servers of enhanced versions collaborate to increase speech recognition reliability. Although it significantly increases generated data volume, the advanced version enables a recognition of the facial expressions to greatly enhance counseling quality. Then, we propose a quality assurance mechanism using multiple levels of catalog, as well as 5G/6G slice inclusive of MEC, and conduct experiments to uncover issues related to the 4G. Results indicate that the number of speech recognition errors in Internet Cloud is more than twofold compared to edge computing, implying that quality assurance using 5G/6G in conjunction with VICA Counseling (ro)bot has higher efficiency.}, } @article {pmid35368952, year = {2022}, author = {Chen, Y and Wang, J and Gao, W and Yu, D and Shou, X}, title = {Construction and Clinical Application Effect of General Surgery Patient-Oriented Nursing Information Platform Using Cloud Computing.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {8273701}, pmid = {35368952}, issn = {2040-2309}, mesh = {*Cloud Computing ; Humans ; *Software ; Technology ; }, abstract = {The paper aims to build a nursing information platform (NIP) for general surgery (GS) patients and explore its clinical application effect based on cloud computing (CC) technology. Specifically, the present work first analyzes and expounds on the characteristics of GS patients, the CC concept, the three-tier service mode of CC, and the cloud data center (CDC). Secondly, based on the principle of the overall system design, the evaluation indexes of medical care end, patient end, family end, and management end are constructed using Visual Studio 2010. Thirdly, the expert evaluation and user evaluation methods are selected to analyze the clinical application effect of the proposed system. Finally, SPSS is used to analyze the effect of the proposed system. The results of the first and second rounds of the expert evaluation show that the authority coefficient of experts is greater than 0.7, which indicates that the degree of expert authority is good. The proposed CC-based GS patient-oriented NIP system is universal. The evaluation results of 20 users have shown 15 doctors and nurses, 14 patients, and 18 family members, who mostly still support applying the proposed CC-based GS patient-oriented NIP system and believe that the system brings convenience and improves work efficiency. In short, more incentives should be taken to build a NIP for GS patients.}, } @article {pmid35368911, year = {2022}, author = {Hayyolalam, V and Otoum, S and Özkasap, Ö}, title = {Dynamic QoS/QoE-aware reliable service composition framework for edge intelligence.}, journal = {Cluster computing}, volume = {25}, number = {3}, pages = {1695-1713}, pmid = {35368911}, issn = {1386-7857}, abstract = {Edge intelligence has become popular recently since it brings smartness and copes with some shortcomings of conventional technologies such as cloud computing, Internet of Things (IoT), and centralized AI adoptions. However, although utilizing edge intelligence contributes to providing smart systems such as automated driving systems, smart cities, and connected healthcare systems, it is not free from limitations. There exist various challenges in integrating AI and edge computing, one of which is addressed in this paper. Our main focus is to handle the adoption of AI methods on resource-constrained edge devices. In this regard, we introduce the concept of Edge devices as a Service (EdaaS) and propose a quality of service (QoS) and quality of experience (QoE)-aware dynamic and reliable framework for AI subtasks composition. The proposed framework is evaluated utilizing three well-known meta-heuristics in terms of various metrics for a connected healthcare application scenario. The experimental results confirm the applicability of the proposed framework. Moreover, the results reveal that black widow optimization (BWO) can handle the issue more efficiently compared to particle swarm optimization (PSO) and simulated annealing (SA). The overall efficiency of BWO over PSO is 95%, and BWO outperforms SA with 100% efficiency. It means that BWO prevails SA and PSO in all and 95% of the experiments, respectively.}, } @article {pmid35365721, year = {2022}, author = {Osipov, V and Zhukova, N and Subbotin, A and Glebovskiy, P and Evnevich, E}, title = {Intelligent escalator passenger safety management.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {5506}, pmid = {35365721}, issn = {2045-2322}, mesh = {Algorithms ; *Elevators and Escalators ; *Neural Networks, Computer ; Safety Management ; Software ; }, abstract = {This article addresses an approach to intelligent safety control of passengers on escalators. The aim is to improve the accuracy of detecting threatening situations on escalators in the subway to make decisions to prevent threats and eliminate the consequences. The novelty of the approach lies in the complex processing of information from three types of sources (video, audio, sensors) using machine learning methods and recurrent neural networks with controlled elements. The conditions and indicators of safety assurance efficiency are clarified. New methods and algorithms for managing the safety of passengers on escalators are proposed. The architecture of a promising safety software system is developed, and implementation of its components for cloud and fog computing environments is provided. Modeling results confirm the capabilities and advantages of the proposed technological solutions for enhancing the safety of escalator passengers, efficiency of control decision making, and system usability. Due to the proposed solutions, it has become possible to increase the speed of identifying situations 3.5 times and increase the accuracy of their determination by 26%. The efficiency of decision making has increased by almost 30%.}, } @article {pmid35360481, year = {2022}, author = {Yang, Y and Chang, Q and Chen, J and Zhou, X and Xue, Q and Song, A}, title = {Construction of a Health Management Model for Early Identification of Ischaemic Stroke in Cloud Computing.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {1018056}, pmid = {35360481}, issn = {2040-2309}, mesh = {*Brain Ischemia/diagnosis ; Cloud Computing ; Humans ; *Ischemic Stroke ; *Stroke/diagnosis ; }, abstract = {Knowledge discovery and cloud computing can help early identification of ischaemic stroke and provide intelligent, humane, and preventive healthcare services for patients at high risk of stroke. This study proposes constructing a health management model for early identification and warning of ischaemic stroke based on IoT and cloud computing, and discusses its connotation, constructive ideas, and research content so as to provide reference for its health management in order to develop and implement countermeasures and to compare the awareness of early stroke symptoms and first aid knowledge among stroke patients and their families before and after the activity. The rate of awareness of early symptoms and first aid among stroke patients and their families increased from 36% to 78%, and the difference was statistically significant (P < 0.05) before and after the activity.}, } @article {pmid35360368, year = {2022}, author = {Brotherton, T and Brotherton, S and Ashworth, H and Kadambi, A and Ebrahim, H and Ebrahim, S}, title = {Development of an Offline, Open-Source, Electronic Health Record System for Refugee Care.}, journal = {Frontiers in digital health}, volume = {4}, number = {}, pages = {847002}, pmid = {35360368}, issn = {2673-253X}, abstract = {While electronic health records (EHRs) have been shown to be effective in improving patient care in low-resource settings, there are still barriers to implementing them, including adaptability, usability, and sustainability. Taking a user-centered design process we developed the Hikma Health EHR for low resourced clinics caring for displaced populations. This EHR was built using React Native and Typescript that sync to a Python backend repository which is deployed on Google Cloud SQL. To date the Hikma Health EHR has been deployed for 26,000 patients. The positive impacts of the system reported by clinician users are 3-fold: (1) improved continuity of care; (2) improved visualization of clinical data; and (3) improved efficiency, resulting in a higher volume of patients being treated. While further development is needed, our open-source model will allow any organization to modify this system to meet their clinical and administrative needs.}, } @article {pmid35353698, year = {2022}, author = {Gauthier, B and Painchaud-April, G and Le Duff, A and Belanger, P}, title = {Lightweight and Amplitude-Free Ultrasonic Imaging Using Single-Bit Digitization and Instantaneous Phase Coherence.}, journal = {IEEE transactions on ultrasonics, ferroelectrics, and frequency control}, volume = {69}, number = {5}, pages = {1763-1774}, doi = {10.1109/TUFFC.2022.3163621}, pmid = {35353698}, issn = {1525-8955}, mesh = {*Algorithms ; Signal-To-Noise Ratio ; *Ultrasonics ; Ultrasonography/methods ; }, abstract = {In the field of ultrasonic nondestructive testing (NDT), the total focusing method (TFM) and its derivatives are now commercially available on portable devices and are getting more popular within the NDT community. However, its implementation requires the collection of a very large amount of data with the full matrix capture (FMC) as the worst case scenario. Analyzing all the data also requires significant processing power, and consequently, there is an interest in: 1) reducing the required storage capacity used by imaging algorithms, such as delay-and-sum (DAS) imaging and 2) allowing the transmission and postprocessing of inspection data remotely. In this study, a different implementation of the TFM algorithm is used based on the vector coherence factor (VCF) that is used as an image itself. This method, also generally known as phase coherence imaging, presents certain advantages, such as a better sensitivity to diffracting geometries, consistency of defect restitution among different views, and an amplitude-free behavior as only the instantaneous phase of the signal is considered. Some drawbacks of this method must also be mentioned, including the fact that it poorly reproduces planar reflectors and presents a lower signal-to-noise ratio (SNR) than amplitude-based methods. However, previous studies showed that it can be used as a reliable tool for crack-like defect sizing. Thus, a lightweight acquisition process is proposed through single-bit digitization of the signal, followed by a phase retrieval method based on the rising and falling edge locations, allowing to feed the phase coherence imaging algorithm. Simulated and experimental tests were first performed in this study on several side-drilled holes (SDHs) in a stainless steel block and then extended to an experimental study on angled notches in a 19.05-mm (3/4")-thick steel sample plate through multiview imaging. Results obtained using the array performance indicator (API) and the contrast-to-noise ratio (CNR) as quantitative evaluation parameters showed that the proposed lightweight acquisition process, which relies on binary signals, allows a reduction of the data throughput of up to 47 times. This throughput reduction is achieved while still presenting very similar results to phase coherence imaging based on the instantaneous phase derived from the Hilbert transform of the full waveform. In an era of increasing wireless network speed and cloud computing, these results allow considering interesting perspectives for the reduction of inspection hardware costs and remote postprocessing.}, } @article {pmid35353508, year = {2022}, author = {Kutzner, C and Kniep, C and Cherian, A and Nordstrom, L and Grubmüller, H and de Groot, BL and Gapsys, V}, title = {GROMACS in the Cloud: A Global Supercomputer to Speed Up Alchemical Drug Design.}, journal = {Journal of chemical information and modeling}, volume = {62}, number = {7}, pages = {1691-1711}, pmid = {35353508}, issn = {1549-960X}, mesh = {Cloud Computing ; *Computers ; *Computing Methodologies ; Drug Design ; Ligands ; Molecular Dynamics Simulation ; }, abstract = {We assess costs and efficiency of state-of-the-art high-performance cloud computing and compare the results to traditional on-premises compute clusters. Our use case is atomistic simulations carried out with the GROMACS molecular dynamics (MD) toolkit with a particular focus on alchemical protein-ligand binding free energy calculations. We set up a compute cluster in the Amazon Web Services (AWS) cloud that incorporates various different instances with Intel, AMD, and ARM CPUs, some with GPU acceleration. Using representative biomolecular simulation systems, we benchmark how GROMACS performs on individual instances and across multiple instances. Thereby we assess which instances deliver the highest performance and which are the most cost-efficient ones for our use case. We find that, in terms of total costs, including hardware, personnel, room, energy, and cooling, producing MD trajectories in the cloud can be about as cost-efficient as an on-premises cluster given that optimal cloud instances are chosen. Further, we find that high-throughput ligand-screening can be accelerated dramatically by using global cloud resources. For a ligand screening study consisting of 19 872 independent simulations or ∼200 μs of combined simulation trajectory, we made use of diverse hardware available in the cloud at the time of the study. The computations scaled-up to reach peak performance using more than 4 000 instances, 140 000 cores, and 3 000 GPUs simultaneously. Our simulation ensemble finished in about 2 days in the cloud, while weeks would be required to complete the task on a typical on-premises cluster consisting of several hundred nodes.}, } @article {pmid35345804, year = {2022}, author = {He, J}, title = {Decision Scheduling for Cloud Computing Tasks Relying on Solving Large Linear Systems of Equations.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {3411959}, pmid = {35345804}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; Humans ; Reproducibility of Results ; }, abstract = {With the continuous reform and innovation of Internet technology and the continuous development and progress of social economy, Big Data cloud computing technology is more and more widely used in people's work and life. Many parallel algorithms play a very important role in solving large linear equations in various applications. To this end, this article aims to propose and summarize a cloud computing task scheduling model that relies on the solution of large linear equations. The method of this paper is to study the technology of solving large-scale linear equations and propose an M-QoS-OCCSM scheduling model. The function of the experimental method is to solve the problem of efficiently executing N mutually dependent parallel tasks within limited resources, while fully satisfying users' expectations of task completion time, bandwidth rate, reliability, and cost. In this paper, the application experiment of large-scale linear equations in task scheduling is used to study task scheduling algorithms. The results show that when the task load is 10 and 20, the convergence speed of the MPQGA algorithm is 32 seconds and 95 seconds faster than that of the BGA algorithm, respectively.}, } @article {pmid35345578, year = {2023}, author = {Swain, AK and Garza, VR}, title = {Key Factors in Achieving Service Level Agreements (SLA) for Information Technology (IT) Incident Resolution.}, journal = {Information systems frontiers : a journal of research and innovation}, volume = {25}, number = {2}, pages = {819-834}, pmid = {35345578}, issn = {1387-3326}, abstract = {In this paper, we analyze the impact of various factors on meeting service level agreements (SLAs) for information technology (IT) incident resolution. Using a large IT services incident dataset, we develop and compare multiple models to predict the value of a target Boolean variable indicating whether an incident met its SLA. Logistic regression and neural network models are found to have the best performance in terms of misclassification rates and average squared error. From the best-performing models, we identify a set of key variables that influence the achievement of SLAs. Based on model insights, we provide a thorough discussion of IT process management implications. We suggest several strategies that can be adopted by incident management teams to improve the quality and effectiveness of incident management processes, and recommend avenues for future research.}, } @article {pmid35341205, year = {2022}, author = {Gunjan, VK and Vijayalata, Y and Valli, S and Kumar, S and Mohamed, MO and Saravanan, V}, title = {Machine Learning and Cloud-Based Knowledge Graphs to Recognize Suicidal Mental Tendencies.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {3604113}, pmid = {35341205}, issn = {1687-5273}, mesh = {*Cloud Computing ; Humans ; Machine Learning ; Pattern Recognition, Automated ; *Suicidal Ideation ; }, abstract = {To improve the quality of knowledge service selection in a cloud manufacturing environment, this paper proposes a cloud manufacturing knowledge service optimization decision method based on users' psychological behavior. Based on the characteristic analysis of cloud manufacturing knowledge service, establish the optimal evaluation index system of cloud manufacturing knowledge service, use the rough set theory to assign initial weights to each evaluation index, and adjust the initial weights according to the user's multiattribute preference to ensure that the consequences are allocated correctly. The system can help counselors acquire psychological knowledge in time and identify counselors with suicidal tendencies to prevent danger. This paper collected some psychological information data and built a knowledge graph by creating a dictionary and generating entities and relationships. The Han language processing word segmentation tool generates keywords, and CHI (Chi-square) feature selection is used to classify the problem. This feature selection is a statistical premise test that is acceptable when the chi-square test results are distributed with the null hypothesis. It includes the Pearson chi-square test and its variations. The Chi-square test has several benefits, including its distributed processing resilience, ease of computation, broad information gained from the test, usage in research when statistical assumptions are not satisfied, and adaptability in organizing information from multiple or many more group investigations. For improving question and answer efficiency, compared with other models, the BiLSTM (bidirectional long short-term memory) model is preferred to build suicidal tendencies. The Han language processing is a method that is used for word segmentation, and the advantage of this method is that it plays a key role in the word segmentation tool and generates keywords, and CHI (Chi-square) feature selection is used to classify the problem. Text classifier detects dangerous user utterances, question template matching, and answer generation by computing similarity scores. Finally, the system accuracy test is carried out, proving that the system can effectively answer the questions related to psychological counseling. The extensive experiments reveal that the method in this paper's accuracy rate, recall rate, and F1 value is much superior to other standard models in detecting psychological issues.}, } @article {pmid35341063, year = {2022}, author = {Grassi, L and Recchiuto, CT and Sgorbissa, A}, title = {Knowledge-Grounded Dialogue Flow Management for Social Robots and Conversational Agents.}, journal = {International journal of social robotics}, volume = {14}, number = {5}, pages = {1273-1293}, pmid = {35341063}, issn = {1875-4791}, abstract = {The article proposes a system for knowledge-based conversation designed for Social Robots and other conversational agents. The proposed system relies on an Ontology for the description of all concepts that may be relevant conversation topics, as well as their mutual relationships. The article focuses on the algorithm for Dialogue Management that selects the most appropriate conversation topic depending on the user input. Moreover, it discusses strategies to ensure a conversation flow that captures, as more coherently as possible, the user intention to drive the conversation in specific directions while avoiding purely reactive responses to what the user says. To measure the quality of the conversation, the article reports the tests performed with 100 recruited participants, comparing five conversational agents: (i) an agent addressing dialogue flow management based only on the detection of keywords in the speech, (ii) an agent based both on the detection of keywords and the Content Classification feature of Google Cloud Natural Language, (iii) an agent that picks conversation topics randomly, (iv) a human pretending to be a chatbot, and (v) one of the most famous chatbots worldwide: Replika. The subjective perception of the participants is measured both with the SASSI (Subjective Assessment of Speech System Interfaces) tool, as well as with a custom survey for measuring the subjective perception of coherence.}, } @article {pmid35340260, year = {2022}, author = {Elhadad, A and Alanazi, F and Taloba, AI and Abozeid, A}, title = {Fog Computing Service in the Healthcare Monitoring System for Managing the Real-Time Notification.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {5337733}, pmid = {35340260}, issn = {2040-2309}, mesh = {Cloud Computing ; Computers ; Delivery of Health Care ; Humans ; *Internet of Things ; Monitoring, Physiologic ; }, abstract = {A new computing paradigm that has been growing in computing systems is fog computing. In the healthcare industry, Internet of Things (IoT) driven fog computing is being developed to speed up the services for the general public and save billions of lives. This new computing platform, based on the fog computing paradigm, may reduce latency when transmitting and communicating signals with faraway servers, allowing medical services to be delivered more quickly in both spatial and temporal dimensions. One of the necessary qualities of computing systems that can enable the completion of healthcare operations is latency reduction. Fog computing can provide reduced latency when compared to cloud computing due to the use of only low-end computers, mobile phones, and personal devices in fog computing. In this paper, a new framework for healthcare monitoring for managing real-time notification based on fog computing has been proposed. The proposed system monitors the patient's body temperature, heart rate, and blood pressure values obtained from the sensors that are embedded into a wearable device and notifies the doctors or caregivers in real time if there occur any contradictions in the normal threshold value using the machine learning algorithms. The notification can also be set for the patients to alert them about the periodical medications or diet to be maintained by the patients. The cloud layer stores the big data into the cloud for future references for the hospitals and the researchers.}, } @article {pmid35340246, year = {2022}, author = {Mishra, AK and Govil, MC and Pilli, ES and Bijalwan, A}, title = {Digital Forensic Investigation of Healthcare Data in Cloud Computing Environment.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {9709101}, pmid = {35340246}, issn = {2040-2309}, mesh = {*Cloud Computing ; Delivery of Health Care ; Humans ; *Information Storage and Retrieval ; Software ; }, abstract = {Cloud computing is widely used in various sectors such as finance, health care, and education. Factors such as cost optimization, interoperability, data analysis, and data ownership functionalities are attracting healthcare industry to use cloud services. Security and forensic concerns are associated in cloud environments as sensitive healthcare data can attract the outside attacker and inside malicious events. Storage is the most used service in cloud computing environments. Data stored in iCloud (Apple Inc. Cloud Service Provider) is accessible via a Web browser, cloud client application, or mobile application. Apple Inc. provides iCloud service to synchronize data from MacBook, iPhone, iPad, etc. Core applications such as Mail, Contacts, Calendar, Photos, Notes, Reminders, and Keynote are synced with iCloud. Various operations can be performed on cloud data, including editing, deleting, uploading, and downloading data, as well as synchronizing data between devices. These operations generate log files and directories that are essential from an investigative perspective. This paper presents a taxonomy of iCloud forensic tools that provides a searchable catalog for forensic practitioners to identify the tools that meet their technical requirements. A case study involving healthcare data storage on iCloud service demonstrates that artifacts related to environmental information, browser activities (history, cookies, cache), synchronization activities, log files, directories, data content, and iCloud user activities are stored on a MacBook system. A GUI-based dashboard is developed to support iCloud forensics, specifically the collection of artifacts from a MacBook system.}, } @article {pmid35336536, year = {2022}, author = {He, X and Zhang, X and Wang, Y and Ji, H and Duan, X and Guo, F}, title = {Spatial Attention Frustum: A 3D Object Detection Method Focusing on Occluded Objects.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {6}, pages = {}, pmid = {35336536}, issn = {1424-8220}, abstract = {Achieving the accurate perception of occluded objects for autonomous vehicles is a challenging problem. Human vision can always quickly locate important object regions in complex external scenes, while other regions are only roughly analysed or ignored, defined as the visual attention mechanism. However, the perception system of autonomous vehicles cannot know which part of the point cloud is in the region of interest. Therefore, it is meaningful to explore how to use the visual attention mechanism in the perception system of autonomous driving. In this paper, we propose the model of the spatial attention frustum to solve object occlusion in 3D object detection. The spatial attention frustum can suppress unimportant features and allocate limited neural computing resources to critical parts of the scene, thereby providing greater relevance and easier processing for higher-level perceptual reasoning tasks. To ensure that our method maintains good reasoning ability when faced with occluded objects with only a partial structure, we propose a local feature aggregation module to capture more complex local features of the point cloud. Finally, we discuss the projection constraint relationship between the 3D bounding box and the 2D bounding box and propose a joint anchor box projection loss function, which will help to improve the overall performance of our method. The results of the KITTI dataset show that our proposed method can effectively improve the detection accuracy of occluded objects. Our method achieves 89.46%, 79.91% and 75.53% detection accuracy in the easy, moderate, and hard difficulty levels of the car category, and achieves a 6.97% performance improvement especially in the hard category with a high degree of occlusion. Our one-stage method does not need to rely on another refining stage, comparable to the accuracy of the two-stage method.}, } @article {pmid35336483, year = {2022}, author = {Jian, MS and Pan, CJ}, title = {Blockchained Industry Information Handoff Based on Internet of Things Devices with Intelligent Customized Object Recognition.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {6}, pages = {}, pmid = {35336483}, issn = {1424-8220}, abstract = {To determine the quality and safety of each product used in manufacturing, the exchange of measured data between machines, operators, production lines, and manufacturing companies is crucial. In this study, we developed a system with customized object recognition capability for the secure blockchain-based transfer of industry information through Internet of Things (IoT) devices. In the proposed system, product history data are transferred through blockchains through artificial intelligence (AI)-based object recognition. Individual objects are recognized and represented using a unique number sequence for use as a private key on a blockchain. The data history can be automatically secured, and all the data are traceable and trackable. The reliability and validity of the proposed system were verified using the Jetson Nano Developer Kit. The proposed AI-based system is a low-cost embedded system. Based on the open-source cloud computing platform, the required computing resources for blockchain computing and storage are available. In an experiment, the proposed system achieved >99% accuracy within 1 s. Furthermore, the computational cost of the proposed system was 10% that of traditional AI systems. The proposed device can be rapidly connected to IoT devices that require limited manual operation and can be adopted in manufacturing and production lines.}, } @article {pmid35336357, year = {2022}, author = {Silva, J and Pereira, P and Machado, R and Névoa, R and Melo-Pinto, P and Fernandes, D}, title = {Customizable FPGA-Based Hardware Accelerator for Standard Convolution Processes Empowered with Quantization Applied to LiDAR Data.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {6}, pages = {}, pmid = {35336357}, issn = {1424-8220}, support = {POCI-01-0247-FEDER-037902//European Structural and Investment Funds in the 487 FEDER component, through the Operational Competitiveness and Internationalization Programme 488 (COMPETE 2020) [/ ; }, mesh = {*Algorithms ; *Computers ; }, abstract = {In recent years there has been an increase in the number of research and developments in deep learning solutions for object detection applied to driverless vehicles. This application benefited from the growing trend felt in innovative perception solutions, such as LiDAR sensors. Currently, this is the preferred device to accomplish those tasks in autonomous vehicles. There is a broad variety of research works on models based on point clouds, standing out for being efficient and robust in their intended tasks, but they are also characterized by requiring point cloud processing times greater than the minimum required, given the risky nature of the application. This research work aims to provide a design and implementation of a hardware IP optimized for computing convolutions, rectified linear unit (ReLU), padding, and max pooling. This engine was designed to enable the configuration of features such as varying the size of the feature map, filter size, stride, number of inputs, number of filters, and the number of hardware resources required for a specific convolution. Performance results show that by resorting to parallelism and quantization approach, the proposed solution could reduce the amount of logical FPGA resources by 40 to 50%, enhancing the processing time by 50% while maintaining the deep learning operation accuracy.}, } @article {pmid35336335, year = {2022}, author = {Loseto, G and Scioscia, F and Ruta, M and Gramegna, F and Ieva, S and Fasciano, C and Bilenchi, I and Loconte, D}, title = {Osmotic Cloud-Edge Intelligence for IoT-Based Cyber-Physical Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {6}, pages = {}, pmid = {35336335}, issn = {1424-8220}, mesh = {*Artificial Intelligence ; Intelligence ; Osmosis ; *Software ; }, abstract = {Artificial Intelligence (AI) in Cyber-Physical Systems allows machine learning inference on acquired data with ever greater accuracy, thanks to models trained with massive amounts of information generated by Internet of Things devices. Edge Intelligence is increasingly adopted to execute inference on data at the border of local networks, exploiting models trained in the Cloud. However, the training tasks on Edge nodes are not supported yet with flexible dynamic migration between Edge and Cloud. This paper proposes a Cloud-Edge AI microservice architecture, based on Osmotic Computing principles. Notable features include: (i) containerized architecture enabling training and inference on the Edge, Cloud, or both, exploiting computational resources opportunistically to reach the best prediction accuracy; and (ii) microservice encapsulation of each architectural module, allowing a direct mapping with Commercial-Off-The-Shelf (COTS) components. Grounding on the proposed architecture: (i) a prototype has been realized with commodity hardware leveraging open-source software technologies; and (ii) it has been then used in a small-scale intelligent manufacturing case study, carrying out experiments. The obtained results validate the feasibility and key benefits of the approach.}, } @article {pmid35336322, year = {2022}, author = {Kim, YJ and Park, CH and Yoon, M}, title = {FILM: Filtering and Machine Learning for Malware Detection in Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {6}, pages = {}, pmid = {35336322}, issn = {1424-8220}, support = {2018-0-00429//Institute for Information and Communications Technology Promotion/ ; IITP-2020-0-01826//Institute for Information and Communications Technology Promotion/ ; NRF-2020R1A2C1006135//National Research Foundation of Korea/ ; }, mesh = {Humans ; *Machine Learning ; }, abstract = {Machine learning with static-analysis features extracted from malware files has been adopted to detect malware variants, which is desirable for resource-constrained edge computing and Internet-of-Things devices with sensors; however, this learned model suffers from a misclassification problem because some malicious files have almost the same static-analysis features as benign ones. In this paper, we present a new detection method for edge computing that can utilize existing machine learning models to classify a suspicious file into either benign, malicious, or unpredictable categories while existing models make only a binary decision of either benign or malicious. The new method can utilize any existing deep learning models developed for malware detection after appending a simple sigmoid function to the models. When interpreting the sigmoid value during the testing phase, the new method determines if the model is confident about its prediction; therefore, the new method can take only the prediction of high accuracy, which reduces incorrect predictions on ambiguous static-analysis features. Through experiments on real malware datasets, we confirm that the new scheme significantly enhances the accuracy, precision, and recall of existing deep learning models. For example, the accuracy is enhanced from 0.96 to 0.99, while some files are classified as unpredictable that can be entrusted to the cloud for further dynamic or human analysis.}, } @article {pmid35336277, year = {2022}, author = {Chen, L and Wei, L and Wang, Y and Wang, J and Li, W}, title = {Monitoring and Predictive Maintenance of Centrifugal Pumps Based on Smart Sensors.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {6}, pages = {}, pmid = {35336277}, issn = {1424-8220}, mesh = {Equipment Failure ; Monitoring, Physiologic ; Temperature ; *Vibration ; }, abstract = {Centrifugal pumps have a wide range of applications in industrial and municipal water affairs. During the use of centrifugal pumps, failures such as bearing wear, blade damage, impeller imbalance, shaft misalignment, cavitation, water hammer, etc., often occur. It is of great importance to use smart sensors and digital Internet of Things (IoT) systems to monitor the real-time operating status of pumps and predict potential failures for achieving predictive maintenance of pumps and improving the intelligence level of machine health management. Firstly, the common fault forms of centrifugal pumps and the characteristics of vibration signals when a fault occurs are introduced. Secondly, the centrifugal pump monitoring IoT system is designed. The system is mainly composed of wireless sensors, wired sensors, data collectors, and cloud servers. Then, the microelectromechanical system (MEMS) chip is used to design a wireless vibration temperature integrated sensor, a wired vibration temperature integrated sensor, and a data collector to monitor the running state of the pump. The designed wireless sensor communicates with the server through Narrow Band Internet of Things (NB-IoT). The output of the wired sensor is connected to the data collector, and the designed collector can communicate with the server through 4G communication. Through cloud-side collaboration, real-time monitoring of the running status of centrifugal pumps and intelligent diagnosis of centrifugal pump faults are realized. Finally, on-site testing and application verification of the system was conducted. The test results show that the designed sensors and sensor application system can make good use of the centrifugal pump failure mechanism to automatically diagnose equipment failures. Moreover, the diagnostic accuracy rate is above 85% by using the method of wired sensor and collector. As a low-cost and easy-to-implement solution, wireless sensors can also monitor gradual failures well. The research on the sensors and pump monitoring system provides feasible methods and an effective means for the application of centrifugal pump health management and predictive maintenance.}, } @article {pmid35332213, year = {2022}, author = {Moshiri, N and Fisch, KM and Birmingham, A and DeHoff, P and Yeo, GW and Jepsen, K and Laurent, LC and Knight, R}, title = {The ViReflow pipeline enables user friendly large scale viral consensus genome reconstruction.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {5077}, pmid = {35332213}, issn = {2045-2322}, support = {75D30120C09795/CC/CDC HHS/United States ; UL1 TR001442/TR/NCATS NIH HHS/United States ; S10 OD026929/OD/NIH HHS/United States ; 2038509//National Science Foundation/ ; 2028040//National Science Foundation/ ; }, mesh = {*COVID-19/epidemiology ; Genome, Viral/genetics ; Humans ; Pandemics ; SARS-CoV-2/genetics ; *Software ; }, abstract = {Throughout the COVID-19 pandemic, massive sequencing and data sharing efforts enabled the real-time surveillance of novel SARS-CoV-2 strains throughout the world, the results of which provided public health officials with actionable information to prevent the spread of the virus. However, with great sequencing comes great computation, and while cloud computing platforms bring high-performance computing directly into the hands of all who seek it, optimal design and configuration of a cloud compute cluster requires significant system administration expertise. We developed ViReflow, a user-friendly viral consensus sequence reconstruction pipeline enabling rapid analysis of viral sequence datasets leveraging Amazon Web Services (AWS) cloud compute resources and the Reflow system. ViReflow was developed specifically in response to the COVID-19 pandemic, but it is general to any viral pathogen. Importantly, when utilized with sufficient compute resources, ViReflow can trim, map, call variants, and call consensus sequences from amplicon sequence data from 1000 SARS-CoV-2 samples at 1000X depth in < 10 min, with no user intervention. ViReflow's simplicity, flexibility, and scalability make it an ideal tool for viral molecular epidemiological efforts.}, } @article {pmid35327820, year = {2022}, author = {Zhu, H and Xue, Q and Li, T and Xie, D}, title = {Traceable Scheme of Public Key Encryption with Equality Test.}, journal = {Entropy (Basel, Switzerland)}, volume = {24}, number = {3}, pages = {}, pmid = {35327820}, issn = {1099-4300}, abstract = {Public key encryption supporting equality test (PKEwET) schemes, because of their special function, have good applications in many fields, such as in cloud computing services, blockchain, and the Internet of Things. The original PKEwET has no authorization function. Subsequently, many PKEwET schemes have been proposed with the ability to perform authorization against various application scenarios. However, these schemes are incapable of traceability to the ciphertexts. In this paper, the ability of tracing to the ciphertexts is introduced into a PKEwET scheme. For the ciphertexts, the presented scheme supports not only the equality test, but also has the function of traceability. Meanwhile, the security of the proposed scheme is revealed by a game between an adversary and a simulator, and it achieves a desirable level of security. Depending on the attacker's privileges, it can resist OW-CCA security against an adversary with a trapdoor, and can resist IND-CCA security against an adversary without a trapdoor. Finally, the performance of the presented scheme is discussed.}, } @article {pmid35320088, year = {2023}, author = {Fan, H and Yang, Y and Kankanhalli, M}, title = {Point Spatio-Temporal Transformer Networks for Point Cloud Video Modeling.}, journal = {IEEE transactions on pattern analysis and machine intelligence}, volume = {45}, number = {2}, pages = {2181-2192}, doi = {10.1109/TPAMI.2022.3161735}, pmid = {35320088}, issn = {1939-3539}, abstract = {Due to the inherent unorderliness and irregularity of point cloud, points emerge inconsistently across different frames in a point cloud video. To capture the dynamics in point cloud videos, tracking points and limiting temporal modeling range are usually employed to preserve spatio-temporal structure. However, as points may flow in and out across frames, computing accurate point trajectories is extremely difficult, especially for long videos. Moreover, when points move fast, even in a small temporal window, points may still escape from a region. Besides, using the same temporal range for different motions may not accurately capture the temporal structure. In this paper, we propose a Point Spatio-Temporal Transformer (PST-Transformer). To preserve the spatio-temporal structure, PST-Transformer adaptively searches related or similar points across the entire video by performing self-attention on point features. Moreover, our PST-Transformer is equipped with an ability to encode spatio-temporal structure. Because point coordinates are irregular and unordered but point timestamps exhibit regularities and order, the spatio-temporal encoding is decoupled to reduce the impact of the spatial irregularity on the temporal modeling. By properly preserving and encoding spatio-temporal structure, our PST-Transformer effectively models point cloud videos and shows superior performance on 3D action recognition and 4D semantic segmentation.}, } @article {pmid35319833, year = {2022}, author = {ElZarrad, MK and Lee, AY and Purcell, R and Steele, SJ}, title = {Advancing an agile regulatory ecosystem to respond to the rapid development of innovative technologies.}, journal = {Clinical and translational science}, volume = {15}, number = {6}, pages = {1332-1339}, pmid = {35319833}, issn = {1752-8062}, mesh = {*Artificial Intelligence ; *Ecosystem ; Humans ; Policy Making ; }, abstract = {Technological advancements are dramatically changing the landscape of therapeutic development. The convergence of advances in computing power, analytical methods, artificial intelligence, novel digital health tools, and cloud-based platforms has the potential to power an exponential acceleration of evidence generation. For regulatory agencies responsible for evidence evaluation and oversight of medical products, these advances present both promises and challenges. Ultimately, realizing the translation and impact of these innovations that could potentially enhance therapeutic development and improve the health of individuals and the public will require a nimble and responsive regulatory approach. Supporting an adaptive policy-making infrastructure that is poised to address novel regulatory considerations, creating a workforce to ensure relevant expertise, and fostering more diverse collaborations with a broader group of stakeholders are steps toward the goal of modernizing the regulatory ecosystem. This article outlines approaches that can help provide the flexibility and tools needed to foster innovation, while ensuring the safety and effectiveness of medical products.}, } @article {pmid35317470, year = {2022}, author = {Pathak, S and Raj, R and Singh, K and Verma, PK and Kumar, B}, title = {Development of portable and robust cataract detection and grading system by analyzing multiple texture features for Tele-Ophthalmology.}, journal = {Multimedia tools and applications}, volume = {81}, number = {16}, pages = {23355-23371}, pmid = {35317470}, issn = {1380-7501}, abstract = {This paper presents a low cost, robust, portable and automated cataract detection system which can detect the presence of cataract from the colored digital eye images and grade their severity. Ophthalmologists detect cataract through visual screening using ophthalmoscope and slit lamps. Conventionally a patient has to visit an ophthalmologist for eye screening and treatment follows the course. Developing countries lack the proper health infrastructure and face huge scarcity of trained medical professionals as well as technicians. The condition is not very satisfactory with the rural and remote areas of developed nations. To bridge this barrier between the patient and the availability of resources, current work focuses on the development of portable low-cost, robust cataract screening and grading system. Similar works use fundus and retinal images which use costly imaging modules and image based detection algorithms which use much complex neural network models. Current work derives its benefit from the advancements in digital image processing techniques. A set of preprocessing has been done on the colored eye image and later texture information in form of mean intensity, uniformity, standard deviation and randomness has been calculated and mapped with the diagnostic opinion of doctor for cataract screening of over 200 patients. For different grades of cataract severity edge pixel count was calculated as per doctor's opinion and later these data are used for calculating the thresholds using hybrid k-means algorithm, for giving a decision on the presence of cataract and grade its severity. Low value of uniformity and high value of other texture parameters confirm the presence of cataract as clouding in eye lens causes the uniformity function to take lower value due to presence of coarse texture. Higher the edge pixel count value, this confirms the presence of starting of cataract as solidified regions in lens are nonuniform. Lower value corresponds to fully solidified region or matured cataract. Proposed algorithm was initially developed on MATLAB, and tested on over 300 patients in an eye camp. The system has shown more than 98% accuracy in detection and grading of cataract. Later a cloud based system was developed with 3D printed image acquisition module to manifest an automated, portable and efficient cataract detection system for Tele-Ophthalmology. The proposed system uses a very simple and efficient technique by mapping the diagnostic opinion of the doctor as well, giving very promising results which suggest its potential use in teleophthalmology applications to reduce the cost of delivering eye care services and increasing its reach effectively. Developed system is simple in design and easy to operate and suitable for mass screening of cataracts. Due to non-invasive and non-mydriatic and mountable nature of device, in person screening is not required. Hence, social distancing norms are easy to follow and device is very useful in COVID-19 like situation.}, } @article {pmid35317343, year = {2022}, author = {Byeon, H}, title = {Screening dementia and predicting high dementia risk groups using machine learning.}, journal = {World journal of psychiatry}, volume = {12}, number = {2}, pages = {204-211}, pmid = {35317343}, issn = {2220-3206}, abstract = {New technologies such as artificial intelligence, the internet of things, big data, and cloud computing have changed the overall society and economy, and the medical field particularly has tried to combine traditional examination methods and new technologies. The most remarkable field in medical research is the technology of predicting high dementia risk group using big data and artificial intelligence. This review introduces: (1) the definition, main concepts, and classification of machine learning and overall distinction of it from traditional statistical analysis models; and (2) the latest studies in mental science to detect dementia and predict high-risk groups in order to help competent researchers who are challenging medical artificial intelligence in the field of psychiatry. As a result of reviewing 4 studies that used machine learning to discriminate high-risk groups of dementia, various machine learning algorithms such as boosting model, artificial neural network, and random forest were used for predicting dementia. The development of machine learning algorithms will change primary care by applying advanced machine learning algorithms to detect high dementia risk groups in the future.}, } @article {pmid35311003, year = {2022}, author = {Wu, J and Turner, N and Bae, JA and Vishwanathan, A and Seung, HS}, title = {RealNeuralNetworks.jl: An Integrated Julia Package for Skeletonization, Morphological Analysis, and Synaptic Connectivity Analysis of Terabyte-Scale 3D Neural Segmentations.}, journal = {Frontiers in neuroinformatics}, volume = {16}, number = {}, pages = {828169}, pmid = {35311003}, issn = {1662-5196}, abstract = {Benefiting from the rapid development of electron microscopy imaging and deep learning technologies, an increasing number of brain image datasets with segmentation and synapse detection are published. Most of the automated segmentation methods label voxels rather than producing neuron skeletons directly. A further skeletonization step is necessary for quantitative morphological analysis. Currently, several tools are published for skeletonization as well as morphological and synaptic connectivity analysis using different computer languages and environments. Recently the Julia programming language, notable for elegant syntax and high performance, has gained rapid adoption in the scientific computing community. Here, we present a Julia package, called RealNeuralNetworks.jl, for efficient sparse skeletonization, morphological analysis, and synaptic connectivity analysis. Based on a large-scale Zebrafish segmentation dataset, we illustrate the software features by performing distributed skeletonization in Google Cloud, clustering the neurons using the NBLAST algorithm, combining morphological similarity and synaptic connectivity to study their relationship. We demonstrate that RealNeuralNetworks.jl is suitable for use in terabyte-scale electron microscopy image segmentation datasets.}, } @article {pmid35310887, year = {2022}, author = {Kumar, A}, title = {A cloud-based buyer-seller watermarking protocol (CB-BSWP) using semi-trusted third party for copy deterrence and privacy preserving.}, journal = {Multimedia tools and applications}, volume = {81}, number = {15}, pages = {21417-21448}, pmid = {35310887}, issn = {1380-7501}, abstract = {Nowadays, cloud computing provides a platform infrastructure for the secure dealing of digital data, but privacy and copy control are the two important issues in it over a network. Cloud data is available to the end user and requires enormous security and privacy techniques to protect the data. Moreover, the access control mechanism with encryption-based technique protects the digital rights for participants in a transaction, but they do not protect the media from being illegally redistributed and do not restrict an authorized user to reveal their secret information this is referred to as you can access but you cannot leak. This brought out a need for controlling copy deterrence and preserving the privacy of digital media over the internet. To overlook this, we proposed a cloud-based buyer-seller watermarking protocol (CB-BSWP) with the use of a semi-trusted third party for copy deterrence and privacy-preserving in the cloud environment. The suggested scheme uses 1) a privacy homomorphism cryptosystem with Diffie-Hellman key exchange algorithm to provide an encrypted domain for the secure exchange of digital media 2) adopt robust and fair watermarking techniques to ensure high imperceptibility and robustness for the watermarked images against attacks 3) two services of cloud Infrastructure as a service (IaaS) to support virtualized computing infrastructure and Watermarking as a service (WaaS) to execute the speedy process of watermarking, this process is supported by watermarking generation and signing phase (WGSP) and watermark extraction and verifying phase reported in 4th section. 4) cloud service provider (CSP) considered as a "semi-trusted" third party to reduce the burden from the trusted third party (TTP) server and provide storage for the encrypted digital media on cloud databases, this frees content owner from not having a separate storage infrastructure. The proposed scheme encrypts the digital content by using SHA-512 algorithm with key size 512-bits to ensure that it doesn't affect computational time during the process of encryption. The suggested scheme addresses the problems of piracy tracing, anonymity, tamper resistance, non-framing, customer rights problem. The role of cloud is crucial because it reduces communication overhead, provides unlimited storage, supports the watermarking process and offers a solution for the secure distribution of end-to-end security of digital content over cloud. To check the performance of the suggested CB-BSWP protocol against common image processing attacks, we have conducted experiments in which the perceptual quality of watermarked digital media was found enhanced, resulting in a robust watermark.}, } @article {pmid35310585, year = {2022}, author = {Guo, Y}, title = {Contextualized Design of IoT (Internet of Things) Finance for Edge Artificial Intelligence Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6046957}, pmid = {35310585}, issn = {1687-5273}, mesh = {*Artificial Intelligence ; China ; Cloud Computing ; *Internet of Things ; }, abstract = {With the widespread application of IoT technology in the world, the new industry of IoT finance has emerged. Under this new business model, commercial banks and other financial institutions can realize safer and more convenient financial services such as payment, financing and asset management through the application of IoT technology and communication network technology. In the cloud computing model, the local terminal device of IOT will transmit the collected data to the cloud server through the network, and the cloud server will complete the data operation. Cloud computing model can well solve the problem of poor performance of IoT devices, but with the increasing number of IoT terminal devices and huge number of devices accessing the network, cloud computing model is constrained by network bandwidth and performance bottleneck, which brings a series of problems such as high latency, poor real-time and low security. In this paper, based on the new industry of IoT finance which is developing rapidly, we construct a POT (Peaks Over Threshold) over threshold model to empirically analyze the operational risk of commercial banks by using the risk loss data of commercial banks, and estimate the corresponding ES values by using the control variables method to measure the operational risk of traditional commercial banks and IoT finance respectively, and compare the total ES values of the two. This paper adopts the control variable method to reduce the frequency of each type of loss events of operational risk of commercial banks in China respectively.}, } @article {pmid35310579, year = {2022}, author = {Shi, F and Lin, J}, title = {Virtual Machine Resource Allocation Optimization in Cloud Computing Based on Multiobjective Genetic Algorithm.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7873131}, pmid = {35310579}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; Computer Simulation ; Resource Allocation ; }, abstract = {Cloud computing is an important milestone in the development of distributed computing as a commercial implementation, and it has good prospects. Infrastructure as a service (IaaS) is an important service mode in cloud computing. It combines massive resources scattered in different spaces into a unified resource pool by means of virtualization technology, facilitating the unified management and use of resources. In IaaS mode, all resources are provided in the form of virtual machines (VM). To achieve efficient resource utilization, reduce users' costs, and save users' computing time, VM allocation must be optimized. This paper proposes a new multiobjective optimization method of dynamic resource allocation for multivirtual machine distribution stability. Combining the current state and future predicted data of each application load, the cost of virtual machine relocation and the stability of new virtual machine placement state are considered comprehensively. A multiobjective optimization genetic algorithm (MOGANS) was designed to solve the problem. The simulation results show that compared with the genetic algorithm (GA-NN) for energy saving and multivirtual machine redistribution overhead, the virtual machine distribution method obtained by MOGANS has a longer stability time. Aiming at this shortage, this paper proposes a multiobjective optimization dynamic resource allocation method (MOGA-C) based on MOEA/D for virtual machine distribution. It is illustrated by experimental simulation that moGA-D can converge faster and obtain similar multiobjective optimization results at the same calculation scale.}, } @article {pmid35300555, year = {2023}, author = {Ahouanmenou, S and Van Looy, A and Poels, G}, title = {Information security and privacy in hospitals: a literature mapping and review of research gaps.}, journal = {Informatics for health & social care}, volume = {48}, number = {1}, pages = {30-46}, doi = {10.1080/17538157.2022.2049274}, pmid = {35300555}, issn = {1753-8165}, mesh = {Humans ; *Privacy ; *Evidence Gaps ; Hospitals ; Computer Security ; Cloud Computing ; }, abstract = {Information security and privacy are matters of concern in every industry. The healthcare sector has lagged in terms of implementing cybersecurity measures. Therefore, hospitals are more exposed to cyber events due to the criticality of patient data. Currently, little is known about state-of-the-art research on information security and privacy in hospitals. The purpose of this study is to report the outcome of a systematic literature review on research about the application of information security and privacy in hospitals. A systematic literature review following the PRISMA methodology was conducted. To reference our sample according to cybersecurity domains, we benchmarked each article against two cybersecurity frameworks: ISO 27001 Annex A and the NIST framework core. Limited articles in our papers referred to the policies and compliance sections of ISO 27001. In addition, most of our sample is classified by the NIST function "Protect," meaning activities related to identity management, access control and data security. Furthermore, we have identified key domains where research in security and privacy are critical, such as big data, IOT, cloud computing, standards and regulations. The results indicate that although cybersecurity is a growing concern in hospitals, research is still weak in some areas. Considering the recrudescence of cyber-attacks in the healthcare sector, we call for more research in hospitals in managerial and non-technical domains of information security and privacy that are uncovered by our analysis.}, } @article {pmid35298506, year = {2022}, author = {Witt, C and Davis, RJ and Yang, Z and Ganey, JL and Gutiérrez, RJ and Healey, S and Hedwall, S and Hoagland, S and Maes, R and Malcolm, K and Sanderlin, J and Seamans, M and Jones, GM}, title = {Linking robust spatiotemporal datasets to assess and monitor habitat attributes of a threatened species.}, journal = {PloS one}, volume = {17}, number = {3}, pages = {e0265175}, pmid = {35298506}, issn = {1932-6203}, mesh = {Animals ; Conservation of Natural Resources/methods ; Ecosystem ; *Endangered Species ; Forests ; *Strigiformes ; }, abstract = {Accessibility of multispectral, multitemporal imagery combined with recent advances in cloud computing and machine learning approaches have enhanced our ability to model habitat characteristics across broad spatial and temporal scales. We integrated a large dataset of known nest and roost sites of a threatened species, the Mexican spotted owl (Strix occidentalis lucida), in the southwestern USA with Landsat imagery processed using the Continuous Change Detection and Classification (CCDC) time series algorithm on Google Earth Engine. We then used maximum entropy modeling (Maxent) to classify the landscape into four 'spectral similarity' classes that reflected the degree to which 30-m pixels contained a multispectral signature similar to that found at known owl nest/roost sites and mapped spectral similarity classes from 1986-2020. For map interpretation, we used nationally consistent forest inventory data to evaluate the structural and compositional characteristics of each spectral similarity class. We found a monotonic increase of structural characteristics typically associated with owl nesting and roosting over classes of increasing similarity, with the 'very similar' class meeting or exceeding published minimum desired management conditions for owl nesting and roosting. We also found an increased rate of loss of forest vegetation typical of owl nesting and roosting since the beginning of the 21st century that can be partly attributed to increased frequency and extent of large (≥400 ha) wildfires. This loss resulted in a 38% reduction over the 35-year study period in forest vegetation most similar to that used for owl nesting and roosting. Our modelling approach using cloud computing with time series of Landsat imagery provided a cost-effective tool for landscape-scale, multidecadal monitoring of vegetative components of a threatened species' habitat. Our approach could be used to monitor trends in the vegetation favored by any other species, provided that high-quality location data such as we presented here are available.}, } @article {pmid35289370, year = {2022}, author = {Knosp, BM and Craven, CK and Dorr, DA and Bernstam, EV and Campion, TR}, title = {Understanding enterprise data warehouses to support clinical and translational research: enterprise information technology relationships, data governance, workforce, and cloud computing.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {29}, number = {4}, pages = {671-676}, pmid = {35289370}, issn = {1527-974X}, support = {UL1 TR002537/TR/NCATS NIH HHS/United States ; UL1 TR003167/TR/NCATS NIH HHS/United States ; UL1 TR001433/TR/NCATS NIH HHS/United States ; UL1TR002384/TR/NCATS NIH HHS/United States ; UL1 TR002369/TR/NCATS NIH HHS/United States ; U24 TR002260/TR/NCATS NIH HHS/United States ; }, mesh = {Cloud Computing ; *Data Warehousing ; Humans ; Information Technology ; *Translational Research, Biomedical ; Workforce ; }, abstract = {OBJECTIVE: Among National Institutes of Health Clinical and Translational Science Award (CTSA) hubs, effective approaches for enterprise data warehouses for research (EDW4R) development, maintenance, and sustainability remain unclear. The goal of this qualitative study was to understand CTSA EDW4R operations within the broader contexts of academic medical centers and technology.

MATERIALS AND METHODS: We performed a directed content analysis of transcripts generated from semistructured interviews with informatics leaders from 20 CTSA hubs.

RESULTS: Respondents referred to services provided by health system, university, and medical school information technology (IT) organizations as "enterprise information technology (IT)." Seventy-five percent of respondents stated that the team providing EDW4R service at their hub was separate from enterprise IT; strong relationships between EDW4R teams and enterprise IT were critical for success. Managing challenges of EDW4R staffing was made easier by executive leadership support. Data governance appeared to be a work in progress, as most hubs reported complex and incomplete processes, especially for commercial data sharing. Although nearly all hubs (n = 16) described use of cloud computing for specific projects, only 2 hubs reported using a cloud-based EDW4R. Respondents described EDW4R cloud migration facilitators, barriers, and opportunities.

DISCUSSION: Descriptions of approaches to how EDW4R teams at CTSA hubs work with enterprise IT organizations, manage workforces, make decisions about data, and approach cloud computing provide insights for institutions seeking to leverage patient data for research.

CONCLUSION: Identification of EDW4R best practices is challenging, and this study helps identify a breadth of viable options for CTSA hubs to consider when implementing EDW4R services.}, } @article {pmid35289369, year = {2022}, author = {Barnes, C and Bajracharya, B and Cannalte, M and Gowani, Z and Haley, W and Kass-Hout, T and Hernandez, K and Ingram, M and Juvvala, HP and Kuffel, G and Martinov, P and Maxwell, JM and McCann, J and Malhotra, A and Metoki-Shlubsky, N and Meyer, C and Paredes, A and Qureshi, J and Ritter, X and Schumm, P and Shao, M and Sheth, U and Simmons, T and VanTol, A and Zhang, Z and Grossman, RL}, title = {The Biomedical Research Hub: a federated platform for patient research data.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {29}, number = {4}, pages = {619-625}, pmid = {35289369}, issn = {1527-974X}, support = {U2CHL138346/NH/NIH HHS/United States ; /HL/NHLBI NIH HHS/United States ; }, mesh = {*Biomedical Research ; *Cloud Computing ; Humans ; Software ; }, abstract = {OBJECTIVE: The objective was to develop and operate a cloud-based federated system for managing, analyzing, and sharing patient data for research purposes, while allowing each resource sharing patient data to operate their component based upon their own governance rules. The federated system is called the Biomedical Research Hub (BRH).

MATERIALS AND METHODS: The BRH is a cloud-based federated system built over a core set of software services called framework services. BRH framework services include authentication and authorization, services for generating and assessing findable, accessible, interoperable, and reusable (FAIR) data, and services for importing and exporting bulk clinical data. The BRH includes data resources providing data operated by different entities and workspaces that can access and analyze data from one or more of the data resources in the BRH.

RESULTS: The BRH contains multiple data commons that in aggregate provide access to over 6 PB of research data from over 400 000 research participants.

DISCUSSION AND CONCLUSION: With the growing acceptance of using public cloud computing platforms for biomedical research, and the growing use of opaque persistent digital identifiers for datasets, data objects, and other entities, there is now a foundation for systems that federate data from multiple independently operated data resources that expose FAIR application programming interfaces, each using a separate data model. Applications can be built that access data from one or more of the data resources.}, } @article {pmid35286502, year = {2022}, author = {Bhattacharya, S and Ghosh, S and Bhattacharyya, S}, title = {Analytical hierarchy process tool in Google Earth Engine platform: a case study of a tropical landfill site suitability.}, journal = {Environmental monitoring and assessment}, volume = {194}, number = {4}, pages = {276}, pmid = {35286502}, issn = {1573-2959}, mesh = {Analytic Hierarchy Process ; Environmental Monitoring/methods ; Geographic Information Systems ; *Refuse Disposal/methods ; Search Engine ; Waste Disposal Facilities ; }, abstract = {Kolkata being a metropolitan city in India has its main municipal solid waste dumpsite situated at Dhapa just adjacent to the East Kolkata Wetlands (Ramsar site). The current prevalent situation at Dhapa is open dumping leading to various contaminations and hazards putting forth the need to look for alternative sites where the landfiilling operation can be shifted to using scientific methods. A user interface (UI)-based analytical hierarchy process (AHP) tool has been developed within the Google Earth Engine (GEE) cloud platform to find out the alternative dumping sites using geospatial layers. AHP function is not available as a native algorithm or developed by any researcher in GEE. The tool has three major functionalities, of which the first one handles the UI elements. The AHP procedure is within another function, and the last function integrates the AHP coefficients to the layers generating the final suitability layer. Users can also upload comparison matrix as GEE asset in the form of CSV file which gets automatically integrated into the AHP to calculate the coefficients and consistency ratio to generate the spatial suitability layers. This approach showcases a generalized AHP function within the GEE environment, which has been done for the first time. The tool is designed in the cloud platform which is dynamic, robust and suitable for use in various AHP-based suitability analysis in environmental monitoring and assessment.}, } @article {pmid35284203, year = {2022}, author = {Chandra, M and Kumar, K and Thakur, P and Chattopadhyaya, S and Alam, F and Kumar, S}, title = {Digital technologies, healthcare and Covid-19: insights from developing and emerging nations.}, journal = {Health and technology}, volume = {12}, number = {2}, pages = {547-568}, pmid = {35284203}, issn = {2190-7188}, abstract = {COVID-19 pandemic created a global health crisis affecting every nation. The essential smart medical devices/accessories, quarantine facilities, surveillance systems, and related digital technologies are in huge demand. Healthcare, manufacturing industries, and educational institutions need technologies that allow working from a safe location. Digital technologies and Industry 4.0 tools have the potential to fulfil these customized requirements during and post COVID-19 crisis. The purpose of this research is to provide understanding to healthcare professionals, government policymakers, researchers, industry professionals, academics, and students/learners of the paradigm of different Digital technologies, Industry 4.0 tools, and their applications during the COVID-19 pandemic. Digital technologies, Industry 4.0 tools and their current and potential applications have been reviewed. The use of different Digital technologies and Industry 4.0 tools is identified. Digital technologies and Industry 4.0 tools (3D Printing, Artificial Intelligence, Cloud Computing, Autonomous Robot, Biosensor, Telemedicine service, Internet of Things (IoT), Virtual reality, and holography) offer opportunities for effective delivery of healthcare service(s), online education, and Work from Home (WFH) environment. The article emphasises the usefulness, most recent development, and implementation of Digital technologies, Industry 4.0 techniques, and tools in fighting the COVID-19 pandemic worldwide.}, } @article {pmid35281749, year = {2022}, author = {Yulianto, F and Kushardono, D and Budhiman, S and Nugroho, G and Chulafak, GA and Dewi, EK and Pambudi, AI}, title = {Evaluation of the Threshold for an Improved Surface Water Extraction Index Using Optical Remote Sensing Data.}, journal = {TheScientificWorldJournal}, volume = {2022}, number = {}, pages = {4894929}, pmid = {35281749}, issn = {1537-744X}, abstract = {In this study, we proposed an automatic water extraction index (AWEI) threshold improvement model that can be used to detect lake surface water based on optical remote sensing data. An annual Landsat 8 mosaic was created using the Google Earth Engine (GEE) platform to obtain cloud-free satellite image data. The challenge of this study was to determine the threshold value, which is essential to show the boundary between water and nonwater. The AWEI was selected for the study to address this challenge. The AWEI approach was developed by adding a threshold water value based on the split-based approach (SBA) calculation analysis for Landsat 8 satellite images. The SBA was used to determine local threshold variations in data scenes that were used to classify water and nonwater. The class threshold between water and nonwater in each selected subscene image can be determined based on the calculation of class intervals generated by geostatistical analysis, initially referred to as smart quantiles. It was used to determine the class separation between water and nonwater in the resulting subscene images. The objectives of this study were (a) to increase the accuracy of automatic lake surface water detection by improvising the determination of threshold values based on analysis and calculations using the SBA and (b) to conduct a test case study of AWEI threshold improvement on several lakes' surface water, which has a variety of different or heterogeneous characteristics. The results show that the threshold value obtained based on the smart quantile calculation from the natural break approach (AWEI ≥ -0.23) gave an overall accuracy of close to 100%. Those results were better than the normal threshold (AWEI ≥ 0.00), with an overall accuracy of 98%. It shows that there has been an increase of 2% in the accuracy based on the confusion matrix calculation. In addition to that, the results obtained when classifying water and nonwater classes for the different national priority lakes in Indonesia vary in overall accuracy from 94% to 100%.}, } @article {pmid35280732, year = {2022}, author = {Bonmatí, LM and Miguel, A and Suárez, A and Aznar, M and Beregi, JP and Fournier, L and Neri, E and Laghi, A and França, M and Sardanelli, F and Penzkofer, T and Lambin, P and Blanquer, I and Menzel, MI and Seymour, K and Figueiras, S and Krischak, K and Martínez, R and Mirsky, Y and Yang, G and Alberich-Bayarri, Á}, title = {CHAIMELEON Project: Creation of a Pan-European Repository of Health Imaging Data for the Development of AI-Powered Cancer Management Tools.}, journal = {Frontiers in oncology}, volume = {12}, number = {}, pages = {742701}, pmid = {35280732}, issn = {2234-943X}, support = {MC_PC_21013/MRC_/Medical Research Council/United Kingdom ; MR/V023799/1/MRC_/Medical Research Council/United Kingdom ; PG/16/78/32402/BHF_/British Heart Foundation/United Kingdom ; }, abstract = {The CHAIMELEON project aims to set up a pan-European repository of health imaging data, tools and methodologies, with the ambition to set a standard and provide resources for future AI experimentation for cancer management. The project is a 4 year long, EU-funded project tackling some of the most ambitious research in the fields of biomedical imaging, artificial intelligence and cancer treatment, addressing the four types of cancer that currently have the highest prevalence worldwide: lung, breast, prostate and colorectal. To allow this, clinical partners and external collaborators will populate the repository with multimodality (MR, CT, PET/CT) imaging and related clinical data. Subsequently, AI developers will enable a multimodal analytical data engine facilitating the interpretation, extraction and exploitation of the information stored at the repository. The development and implementation of AI-powered pipelines will enable advancement towards automating data deidentification, curation, annotation, integrity securing and image harmonization. By the end of the project, the usability and performance of the repository as a tool fostering AI experimentation will be technically validated, including a validation subphase by world-class European AI developers, participating in Open Challenges to the AI Community. Upon successful validation of the repository, a set of selected AI tools will undergo early in-silico validation in observational clinical studies coordinated by leading experts in the partner hospitals. Tool performance will be assessed, including external independent validation on hallmark clinical decisions in response to some of the currently most important clinical end points in cancer. The project brings together a consortium of 18 European partners including hospitals, universities, R&D centers and private research companies, constituting an ecosystem of infrastructures, biobanks, AI/in-silico experimentation and cloud computing technologies in oncology.}, } @article {pmid35271207, year = {2022}, author = {Tzanettis, I and Androna, CM and Zafeiropoulos, A and Fotopoulou, E and Papavassiliou, S}, title = {Data Fusion of Observability Signals for Assisting Orchestration of Distributed Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {5}, pages = {}, pmid = {35271207}, issn = {1424-8220}, mesh = {*Cloud Computing ; }, abstract = {Nowadays, various frameworks are emerging for supporting distributed tracing techniques over microservices-based distributed applications. The objective is to improve observability and management of operational problems of distributed applications, considering bottlenecks in terms of high latencies in the interaction among the deployed microservices. However, such frameworks provide information that is disjoint from the management information that is usually collected by cloud computing orchestration platforms. There is a need to improve observability by combining such information to easily produce insights related to performance issues and to realize root cause analyses to tackle them. In this paper, we provide a modern observability approach and pilot implementation for tackling data fusion aspects in edge and cloud computing orchestration platforms. We consider the integration of signals made available by various open-source monitoring and observability frameworks, including metrics, logs and distributed tracing mechanisms. The approach is validated in an experimental orchestration environment based on the deployment and stress testing of a proof-of-concept microservices-based application. Helpful results are produced regarding the identification of the main causes of latencies in the various application parts and the better understanding of the behavior of the application under different stressing conditions.}, } @article {pmid35271184, year = {2022}, author = {Jassas, MS and Mahmoud, QH}, title = {Analysis of Job Failure and Prediction Model for Cloud Computing Using Machine Learning.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {5}, pages = {}, pmid = {35271184}, issn = {1424-8220}, support = {DDG2020-00032//Natural Sciences and Engineering Research Council/ ; }, mesh = {Algorithms ; Animals ; *Cloud Computing ; Horses ; Machine Learning ; Reproducibility of Results ; *Software ; }, abstract = {Modern applications, such as smart cities, home automation, and eHealth, demand a new approach to improve cloud application dependability and availability. Due to the enormous scope and diversity of the cloud environment, most cloud services, including hardware and software, have encountered failures. In this study, we first analyze and characterize the behaviour of failed and completed jobs using publicly accessible traces. We have designed and developed a failure prediction model to determine failed jobs before they occur. The proposed model aims to enhance resource consumption and cloud application efficiency. Based on three publicly available traces: the Google cluster, Mustang, and Trinity, we evaluate the proposed model. In addition, the traces were also subjected to various machine learning models to find the most accurate one. Our results indicate a significant correlation between unsuccessful tasks and requested resources. The evaluation results also revealed that our model has high precision, recall, and F1-score. Several solutions, such as predicting job failure, developing scheduling algorithms, changing priority policies, or limiting re-submission of tasks, can improve the reliability and availability of cloud services.}, } @article {pmid35271000, year = {2022}, author = {Janbi, N and Mehmood, R and Katib, I and Albeshri, A and Corchado, JM and Yigitcanlar, T}, title = {Imtidad: A Reference Architecture and a Case Study on Developing Distributed AI Services for Skin Disease Diagnosis over Cloud, Fog and Edge.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {5}, pages = {}, pmid = {35271000}, issn = {1424-8220}, support = {DSR Grant No. RG-10-611-38//King Abdulaziz University/ ; }, mesh = {Artificial Intelligence ; *COVID-19/diagnosis ; Humans ; SARS-CoV-2 ; *Skin Diseases/diagnosis ; Software ; }, abstract = {Several factors are motivating the development of preventive, personalized, connected, virtual, and ubiquitous healthcare services. These factors include declining public health, increase in chronic diseases, an ageing population, rising healthcare costs, the need to bring intelligence near the user for privacy, security, performance, and costs reasons, as well as COVID-19. Motivated by these drivers, this paper proposes, implements, and evaluates a reference architecture called Imtidad that provides Distributed Artificial Intelligence (AI) as a Service (DAIaaS) over cloud, fog, and edge using a service catalog case study containing 22 AI skin disease diagnosis services. These services belong to four service classes that are distinguished based on software platforms (containerized gRPC, gRPC, Android, and Android Nearby) and are executed on a range of hardware platforms (Google Cloud, HP Pavilion Laptop, NVIDIA Jetson nano, Raspberry Pi Model B, Samsung Galaxy S9, and Samsung Galaxy Note 4) and four network types (Fiber, Cellular, Wi-Fi, and Bluetooth). The AI models for the diagnosis include two standard Deep Neural Networks and two Tiny AI deep models to enable their execution at the edge, trained and tested using 10,015 real-life dermatoscopic images. The services are evaluated using several benchmarks including model service value, response time, energy consumption, and network transfer time. A DL service on a local smartphone provides the best service in terms of both energy and speed, followed by a Raspberry Pi edge device and a laptop in fog. The services are designed to enable different use cases, such as patient diagnosis at home or sending diagnosis requests to travelling medical professionals through a fog device or cloud. This is the pioneering work that provides a reference architecture and such a detailed implementation and treatment of DAIaaS services, and is also expected to have an extensive impact on developing smart distributed service infrastructures for healthcare and other sectors.}, } @article {pmid35270901, year = {2022}, author = {Orive, A and Agirre, A and Truong, HL and Sarachaga, I and Marcos, M}, title = {Quality of Service Aware Orchestration for Cloud-Edge Continuum Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {5}, pages = {}, pmid = {35270901}, issn = {1424-8220}, support = {825473//European Commission/ ; RTI2018-096116-B-I00//Spanish Ministry of Science, Innovation and Universities/ ; KK-2020/00042//Basque Government/ ; IT1324-19//Basque Government/ ; }, mesh = {*Algorithms ; *Cloud Computing ; }, abstract = {The fast growth in the amount of connected devices with computing capabilities in the past years has enabled the emergence of a new computing layer at the Edge. Despite being resource-constrained if compared with cloud servers, they offer lower latencies than those achievable by Cloud computing. The combination of both Cloud and Edge computing paradigms can provide a suitable infrastructure for complex applications' quality of service requirements that cannot easily be achieved with either of these paradigms alone. These requirements can be very different for each application, from achieving time sensitivity or assuring data privacy to storing and processing large amounts of data. Therefore, orchestrating these applications in the Cloud-Edge computing raises new challenges that need to be solved in order to fully take advantage of this layered infrastructure. This paper proposes an architecture that enables the dynamic orchestration of applications in the Cloud-Edge continuum. It focuses on the application's quality of service by providing the scheduler with input that is commonly used by modern scheduling algorithms. The architecture uses a distributed scheduling approach that can be customized in a per-application basis, which ensures that it can scale properly even in setups with high number of nodes and complex scheduling algorithms. This architecture has been implemented on top of Kubernetes and evaluated in order to asses its viability to enable more complex scheduling algorithms that take into account the quality of service of applications.}, } @article {pmid35267078, year = {2022}, author = {Vinci-Booher, S and Caron, B and Bullock, D and James, K and Pestilli, F}, title = {Development of white matter tracts between and within the dorsal and ventral streams.}, journal = {Brain structure & function}, volume = {227}, number = {4}, pages = {1457-1477}, pmid = {35267078}, issn = {1863-2661}, support = {R01 EB029272/EB/NIBIB NIH HHS/United States ; }, mesh = {Adult ; Child ; Child, Preschool ; Diffusion Tensor Imaging ; Humans ; Learning ; *White Matter/diagnostic imaging ; }, abstract = {The degree of interaction between the ventral and dorsal visual streams has been discussed in multiple scientific domains for decades. Recently, several white matter tracts that directly connect cortical regions associated with the dorsal and ventral streams have become possible to study due to advancements in automated and reproducible methods. The developmental trajectory of this set of tracts, here referred to as the posterior vertical pathway (PVP), has yet to be described. We propose an input-driven model of white matter development and provide evidence for the model by focusing on the development of the PVP. We used reproducible, cloud-computing methods and diffusion imaging from adults and children (ages 5-8 years) to compare PVP development to that of tracts within the ventral and dorsal pathways. PVP microstructure was more adult-like than dorsal stream microstructure, but less adult-like than ventral stream microstructure. Additionally, PVP microstructure was more similar to the microstructure of the ventral than the dorsal stream and was predicted by performance on a perceptual task in children. Overall, results suggest a potential role for the PVP in the development of the dorsal visual stream that may be related to its ability to facilitate interactions between ventral and dorsal streams during learning. Our results are consistent with the proposed model, suggesting that the microstructural development of major white matter pathways is related, at least in part, to the propagation of sensory information within the visual system.}, } @article {pmid35259122, year = {2022}, author = {Chen, Y and Mao, Q and Wang, B and Duan, P and Zhang, B and Hong, Z}, title = {Privacy-Preserving Multi-Class Support Vector Machine Model on Medical Diagnosis.}, journal = {IEEE journal of biomedical and health informatics}, volume = {26}, number = {7}, pages = {3342-3353}, doi = {10.1109/JBHI.2022.3157592}, pmid = {35259122}, issn = {2168-2208}, mesh = {Algorithms ; Cloud Computing ; Computer Security ; Confidentiality ; Humans ; *Privacy ; *Support Vector Machine ; }, abstract = {With the rapid development of machine learning in the medical cloud system, cloud-assisted medical computing provides a concrete platform for remote rapid medical diagnosis services. Support vector machine (SVM), as one of the important algorithms of machine learning, has been widely used in the field of medical diagnosis for its high classification accuracy and efficiency. In some existing schemes, healthcare providers train diagnostic models with SVM algorithms and provide online diagnostic services to doctors. Doctors send the patient's case report to the diagnostic models to obtain the results and assist in clinical diagnosis. However, case report involves patients' privacy, and patients do not want their sensitive information to be leaked. Therefore, the protection of patient's privacy has become an important research direction in the field of online medical diagnosis. In this paper, we propose a privacy-preserving medical diagnosis scheme based on multi-class SVMs. The scheme is based on the distributed two trapdoors public key cryptosystem (DT-PKC) and Boneh-Goh-Nissim (BGN) cryptosystem. We design a secure computing protocol to compute the core process of the SVM classification algorithm. Our scheme can deal with both linearly separable data and nonlinear data while protecting the privacy of user data and support vectors. The results show that our scheme is secure, reliable, scalable with high accuracy.}, } @article {pmid35256689, year = {2022}, author = {Gaikwad, A and Shende, K and Arvind, and Dorai, K}, title = {Implementing efficient selective quantum process tomography of superconducting quantum gates on IBM quantum experience.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {3688}, pmid = {35256689}, issn = {2045-2322}, support = {PMRF Fellowship//Ministry of Education, India/ ; DST/ICPS/QuST/Theme-1/2019/General Project number Q-68.//Department of Science and Technology, Ministry of Science and Technology, India/ ; DST/ICPS/QuST/Theme-1/2019/General Project number Q-74.//Department of Science and Technology, Ministry of Science and Technology, India/ ; }, abstract = {The experimental implementation of selective quantum process tomography (SQPT) involves computing individual elements of the process matrix with the help of a special set of states called quantum 2-design states. However, the number of experimental settings required to prepare input states from quantum 2-design states to selectively and precisely compute a desired element of the process matrix is still high, and hence constructing the corresponding unitary operations in the lab is a daunting task. In order to reduce the experimental complexity, we mathematically reformulated the standard SQPT problem, which we term the modified SQPT (MSQPT) method. We designed the generalized quantum circuit to prepare the required set of input states and formulated an efficient measurement strategy aimed at minimizing the experimental cost of SQPT. We experimentally demonstrated the MSQPT protocol on the IBM QX2 cloud quantum processor and selectively characterized various two- and three-qubit quantum gates.}, } @article {pmid35251564, year = {2022}, author = {Kamruzzaman, MM and Alrashdi, I and Alqazzaz, A}, title = {New Opportunities, Challenges, and Applications of Edge-AI for Connected Healthcare in Internet of Medical Things for Smart Cities.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {2950699}, pmid = {35251564}, issn = {2040-2309}, mesh = {*Artificial Intelligence ; Cities ; Cloud Computing ; Delivery of Health Care ; Humans ; *Internet of Things ; }, abstract = {Revolution in healthcare can be experienced with the advancement of smart sensorial things, Artificial Intelligence (AI), Machine Learning (ML), Deep Learning (DL), Internet of Medical Things (IoMT), and edge analytics with the integration of cloud computing. Connected healthcare is receiving extraordinary contemplation from the industry, government, and the healthcare communities. In this study, several studies published in the last 6 years, from 2016 to 2021, have been selected. The selection process is represented through the Prisma flow chart. It has been identified that these increasing challenges of healthcare can be overcome by the implication of AI, ML, DL, Edge AI, IoMT, 6G, and cloud computing. Still, limited areas have implemented these latest advancements and also experienced improvements in the outcomes. These implications have shown successful results not only in resolving the issues from the perspective of the patient but also from the perspective of healthcare professionals. It has been recommended that the different models that have been proposed in several studies must be validated further and implemented in different domains, to validate the effectiveness of these models and to ensure that these models can be implemented in several regions effectively.}, } @article {pmid35247967, year = {2022}, author = {Kuśmirek, W and Nowak, R}, title = {CNVind: an open source cloud-based pipeline for rare CNVs detection in whole exome sequencing data based on the depth of coverage.}, journal = {BMC bioinformatics}, volume = {23}, number = {1}, pages = {85}, pmid = {35247967}, issn = {1471-2105}, support = {2019/35/N/ST6/01983//Polish National Science Center/ ; }, mesh = {Algorithms ; Cloud Computing ; *DNA Copy Number Variations ; *Exome ; High-Throughput Nucleotide Sequencing/methods ; Exome Sequencing ; }, abstract = {BACKGROUND: A typical Copy Number Variations (CNVs) detection process based on the depth of coverage in the Whole Exome Sequencing (WES) data consists of several steps: (I) calculating the depth of coverage in sequencing regions, (II) quality control, (III) normalizing the depth of coverage, (IV) calling CNVs. Previous tools performed one normalization process for each chromosome-all the coverage depths in the sequencing regions from a given chromosome were normalized in a single run.

METHODS: Herein, we present the new CNVind tool for calling CNVs, where the normalization process is conducted separately for each of the sequencing regions. The total number of normalizations is equal to the number of sequencing regions in the investigated dataset. For example, when analyzing a dataset composed of n sequencing regions, CNVind performs n independent depth of coverage normalizations. Before each normalization, the application selects the k most correlated sequencing regions with the depth of coverage Pearson's Correlation as distance metric. Then, the resulting subgroup of [Formula: see text] sequencing regions is normalized, the results of all n independent normalizations are combined; finally, the segmentation and CNV calling process is performed on the resultant dataset.

RESULTS AND CONCLUSIONS: We used WES data from the 1000 Genomes project to evaluate the impact of independent normalization on CNV calling performance and compared the results with state-of-the-art tools: CODEX and exomeCopy. The results proved that independent normalization allows to improve the rare CNVs detection specificity significantly. For example, for the investigated dataset, we reduced the number of FP calls from over 15,000 to around 5000 while maintaining a constant number of TP calls equal to about 150 CNVs. However, independent normalization of each sequencing region is a computationally expensive process, therefore our pipeline is customized and can be easily run in the cloud computing environment, on the computer cluster, or the single CPU server. To our knowledge, the presented application is the first attempt to implement an innovative approach to independent normalization of the depth of WES data coverage.}, } @article {pmid35240812, year = {2022}, author = {Zhao, J and Wu, D}, title = {The risk assessment on the security of industrial internet infrastructure under intelligent convergence with the case of G.E.'s intellectual transformation.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {19}, number = {3}, pages = {2896-2912}, doi = {10.3934/mbe.2022133}, pmid = {35240812}, issn = {1551-0018}, mesh = {*Artificial Intelligence ; Big Data ; *Cloud Computing ; Internet ; Risk Assessment ; }, abstract = {The industrial internet depends on the development of cloud computing, artificial intelligence, and big data analysis. Intelligent fusion is dependent on the architecture and security features of the industrial internet. Firstly, the paper studies the infrastructure mode that needs to be solved urgently in the industrial internet and provides a possible infrastructure mode and related security evaluation system. Secondly, it analyses the digital transformation process with the case of G.E.os industrial nternet development practice. It clarifies that G.E. is forming a new value closed-loop through digital and strategy mixed channels. Thirdly, industrial internet security research is described within multiple viewpoints based on industrial internet applications, the security service and security assurance defense systemos architecture, and the non-user entrance probability model. Finally, the paper illustrates the changes in knowledge workflow and social collaboration caused by the industrial internet under intelligent manufacture.}, } @article {pmid35230953, year = {2023}, author = {Wang, X and Ren, L and Yuan, R and Yang, LT and Deen, MJ}, title = {QTT-DLSTM: A Cloud-Edge-Aided Distributed LSTM for Cyber-Physical-Social Big Data.}, journal = {IEEE transactions on neural networks and learning systems}, volume = {34}, number = {10}, pages = {7286-7298}, doi = {10.1109/TNNLS.2022.3140238}, pmid = {35230953}, issn = {2162-2388}, abstract = {Cyber-physical-social systems (CPSS), an emerging cross-disciplinary research area, combines cyber-physical systems (CPS) with social networking for the purpose of providing personalized services for humans. CPSS big data, recording various aspects of human lives, should be processed to mine valuable information for CPSS services. To efficiently deal with CPSS big data, artificial intelligence (AI), an increasingly important technology, is used for CPSS data processing and analysis. Meanwhile, the rapid development of edge devices with fast processors and large memories allows local edge computing to be a powerful real-time complement to global cloud computing. Therefore, to facilitate the processing and analysis of CPSS big data from the perspective of multi-attributes, a cloud-edge-aided quantized tensor-train distributed long short-term memory (QTT-DLSTM) method is presented in this article. First, a tensor is used to represent the multi-attributes CPSS big data, which will be decomposed into the QTT form to facilitate distributed training and computing. Second, a distributed cloud-edge computing model is used to systematically process the CPSS data, including global large-scale data processing in the cloud, and local small-scale data processed at the edge. Third, a distributed computing strategy is used to improve the efficiency of training via partitioning the weight matrix and large amounts of input data in the QTT form. Finally, the performance of the proposed QTT-DLSTM method is evaluated using experiments on a public discrete manufacturing process dataset, the Li-ion battery dataset, and a public social dataset.}, } @article {pmid35228830, year = {2022}, author = {Verma, A and Agarwal, G and Gupta, AK}, title = {A novel generalized fuzzy intelligence-based ant lion optimization for internet of things based disease prediction and diagnosis.}, journal = {Cluster computing}, volume = {25}, number = {5}, pages = {3283-3298}, pmid = {35228830}, issn = {1386-7857}, abstract = {In the modern healthcare system, the function of the Internet of Things (IoT) and the data mining methods with cloud computing plays an essential role in controlling a large number of big data for predicting and diagnosing various categories of diseases. However, when the patients suffer from more than one disease, the physician may not identify it properly. Therefore, in this research, the predictive method using the cloud with IoT-based database is proposed for forecasting the diseases that utilized the biosensors to estimate the constraints of patients. In addition, a novel Generalized Fuzzy Intelligence-based Ant Lion Optimization (GFIbALO) classifier along with a regression rule is proposed for predicting the diseases accurately. Initially, the dataset is filtered and feature extracted using the regression rule that data is processed on the proposed GFIbALO approach for classifying diseases. Moreover, suppose the patient has been affected by any diseases, in that case, the warning signal will be alerted to the patients via text or any other way, and the patients can get advice from doctors or any other medical support. The implementation of the proposed GFIbALO classifier is done with the use of the MATLAB tool. Subsequently, the results from the presented model are compared with state of the art techniques, and it shows that the presented method is more beneficial in diagnosis and disease forecast.}, } @article {pmid35225946, year = {2022}, author = {Celesti, A and Cimino, V and Naro, A and Portaro, S and Fazio, M and Villari, M and Calabró, RS}, title = {Recent Considerations on Gaming Console Based Training for Multiple Sclerosis Rehabilitation.}, journal = {Medical sciences (Basel, Switzerland)}, volume = {10}, number = {1}, pages = {}, pmid = {35225946}, issn = {2076-3271}, mesh = {Humans ; *Multiple Sclerosis/therapy ; Pilot Projects ; Postural Balance/physiology ; Quality of Life ; *Video Games ; Young Adult ; }, abstract = {Multiple Sclerosis (MS) is a well-known, chronic demyelinating disease of the Central Nervous System (CNS) and one of the most common causes of disability in young adults. In this context, one of the major challenges in patients' rehabilitation is to maintain the gained motor abilities in terms of functional independence. This could be partially obtained by applying new emerging and cutting-edge virtual/augmented reality and serious game technologies for a playful, noninvasive treatment that was demonstrated to be quite efficient and effective in enhancing the clinical status of patients and their (re)integration into society. Recently, Cloud computing and Internet of Things (IoT) emerged as technologies that can potentially revolutionize patients' care. To achieve such a goal, a system that on one hand gathers patients' clinical parameters through a network of medical IoT devices equipped with sensors and that, on the other hand, sends the collected data to a hospital Cloud for processing and analytics is required. In this paper, we assess the effectiveness of a Nintendo Wii Fit[®] Plus Balance Board (WFBB) used as an IoT medical device adopted in a rehabilitation training program aimed at improving the physical abilities of MS patients (pwMS). In particular, the main scientific contribution of this paper is twofold: (i) to present a preliminary new pilot study investigating whether exercises based on the Nintendo Wii Fit[®] balance board included in a rehabilitation training program could improve physical abilities and Quality of Life (QoL) of patients compared to that of a conventional four-week rehabilitation training program; (ii) to discuss how such a rehabilitation training program could be adopted in the perspective of near future networks of medical IoT-based rehabilitation devices, interconnected with a hospital Cloud system for big data processing to improve patients' therapies and support the scientific research about motor rehabilitation. Results demonstrate the advantages of our approach from both health and technological points of view.}, } @article {pmid35224941, year = {2022}, author = {Wang, H and Chen, WB and He, L and Li, HF}, title = {[Responses of aquatic vegetation coverage to interannual variations of water level in different hydrologically connected sub-lakes of Poyang Lake, China].}, journal = {Ying yong sheng tai xue bao = The journal of applied ecology}, volume = {33}, number = {1}, pages = {191-200}, doi = {10.13287/j.1001-9332.202201.013}, pmid = {35224941}, issn = {1001-9332}, mesh = {China ; *Ecosystem ; Floods ; Hydrology ; *Lakes ; Water ; }, abstract = {The variation of water level is the main environmental factor controlling the growth of aquatic vegetation. It is of significance to understand its influences on aquatic vegetation coverage in sub-lakes under different hydrolo-gical control modes. Taking the free connected sub-lake Bang Lake and locally controlled sub-lake Dahuchi Lake of Poyang Lake as a case and based on remote sensing cloud computing platform of the Google Earth Engine (GEE), we used the pixel binary model to estimate aquatic vegetation coverage from 2000 to 2019, and analyzed the temporal and spatial differentiation characteristics, and the variation trend was simulated by combining the method of Sen+M-K. We analyzed the water level change characteristics during the study period and the relationship between the hydrological parameters and the aquatic vegetation coverage area of sub-lakes with different hydrological connectivity was explored by setting up the water level fluctuation parameters. The results showed that the aquatic vegetation coverage of Bang Lake was more susceptible to water level changes, while Dahuchi Lake was more stable. The aquatic vegetation was patchily and sporadically distributed in the years with low vegetation coverage. In the years with high vegetation coverage, it was distributed in a ring-like pattern, spreading from the center of the lake to the shore. The aquatic vegetation coverage of Bang Lake was more likely influenced by water level fluctuation rate, while the aquatic vegetation coverage of Dahuchi Lake was more likely influenced by the flooding duration of 17 m characteristic water level. The flooding duration of 19 m characteristic water level had a strong negative correlation with the aquatic vegetation coverage of Bang Lake and Dahuchi Lake. The trend of aquatic vegetation in Bang Lake was dominated by stabilization and slight improvement, while that in Dahuchi Lake was dominated by stabilization and significant degradation. Our results could help to further understand the dynamics of water hydrological ecosystem with different hydrological connectivity and provide a reference for lake management and conservation.}, } @article {pmid35224632, year = {2022}, author = {Bradshaw, RL and Kawamoto, K and Kaphingst, KA and Kohlmann, WK and Hess, R and Flynn, MC and Nanjo, CJ and Warner, PB and Shi, J and Morgan, K and Kimball, K and Ranade-Kharkar, P and Ginsburg, O and Goodman, M and Chambers, R and Mann, D and Narus, SP and Gonzalez, J and Loomis, S and Chan, P and Monahan, R and Borsato, EP and Shields, DE and Martin, DK and Kessler, CM and Del Fiol, G}, title = {GARDE: a standards-based clinical decision support platform for identifying population health management cohorts.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {29}, number = {5}, pages = {928-936}, pmid = {35224632}, issn = {1527-974X}, support = {R18 DK123372/DK/NIDDK NIH HHS/United States ; U01 CA232826/CA/NCI NIH HHS/United States ; U24 CA204800/CA/NCI NIH HHS/United States ; }, mesh = {*Decision Support Systems, Clinical ; Delivery of Health Care ; Electronic Health Records ; Humans ; Information Storage and Retrieval ; *Population Health Management ; }, abstract = {UNLABELLED: Population health management (PHM) is an important approach to promote wellness and deliver health care to targeted individuals who meet criteria for preventive measures or treatment. A critical component for any PHM program is a data analytics platform that can target those eligible individuals.

OBJECTIVE: The aim of this study was to design and implement a scalable standards-based clinical decision support (CDS) approach to identify patient cohorts for PHM and maximize opportunities for multi-site dissemination.

MATERIALS AND METHODS: An architecture was established to support bidirectional data exchanges between heterogeneous electronic health record (EHR) data sources, PHM systems, and CDS components. HL7 Fast Healthcare Interoperability Resources and CDS Hooks were used to facilitate interoperability and dissemination. The approach was validated by deploying the platform at multiple sites to identify patients who meet the criteria for genetic evaluation of familial cancer.

RESULTS: The Genetic Cancer Risk Detector (GARDE) platform was created and is comprised of four components: (1) an open-source CDS Hooks server for computing patient eligibility for PHM cohorts, (2) an open-source Population Coordinator that processes GARDE requests and communicates results to a PHM system, (3) an EHR Patient Data Repository, and (4) EHR PHM Tools to manage patients and perform outreach functions. Site-specific deployments were performed on onsite virtual machines and cloud-based Amazon Web Services.

DISCUSSION: GARDE's component architecture establishes generalizable standards-based methods for computing PHM cohorts. Replicating deployments using one of the established deployment methods requires minimal local customization. Most of the deployment effort was related to obtaining site-specific information technology governance approvals.}, } @article {pmid35218029, year = {2022}, author = {Fan, ZG and Tian, NL and He, SH and Ma, GS}, title = {Maintained P2Y12 inhibitor monotherapy after shorter-duration of dual antiplatelet therapy in patients undergoing coronary drug-eluting stents implantation: An updated meta-analysis of randomized trials.}, journal = {Journal of clinical pharmacy and therapeutics}, volume = {47}, number = {7}, pages = {860-869}, doi = {10.1111/jcpt.13626}, pmid = {35218029}, issn = {1365-2710}, support = {ZDXKA2016023//Jiangsu Provincial Key Medical Discipline/ ; }, mesh = {Drug Therapy, Combination ; *Drug-Eluting Stents/adverse effects ; Humans ; *Myocardial Infarction/drug therapy ; *Percutaneous Coronary Intervention/methods ; Platelet Aggregation Inhibitors/adverse effects ; Randomized Controlled Trials as Topic ; *Stroke/etiology/prevention & control ; *Thrombosis/drug therapy ; Treatment Outcome ; }, abstract = {WHAT IS KNOWN AND OBJECTIVE: It is well known that high in-stent thrombotic risk due to the superimposition of a platelet-rich thrombus was considered as the main origin of major adverse cardiac events after stent implantation. The clinical management of antiplatelet therapy strategy after percutaneous coronary intervention (PCI) remains controversial. This study is sought to explore the efficacy and safety of a maintained P2Y12 inhibitor monotherapy after shorter-duration of dual antiplatelet therapy (DAPT) in these patients.

METHODS: Medline, Google Scholar, Web of Science, and the Cochrane Controlled Trials Registry were searched online for retrieving eligible citations. A composite of all-cause death, myocardial infarction (MI) and stroke was defined as major adverse cardio- and cerebro-vascular events (MACCE), which is analysed as the primary efficacy endpoint. The risk of bleeding events was chosen as safety endpoint.

RESULTS: Five randomized clinical trials (RCT) with 32,143 patients were finally analysed. A maintained P2Y12 inhibitor monotherapy after shorter-duration of DAPT cloud not only reduce the incidence of MACCE [odds ratios (OR): 0.89, 95% confidence intervals (CI): 0.79-0.99, p = 0.037], but also the bleeding risk (OR 0.61, 95% CI: 0.44-0.85, p = 0.003). No higher incidence of any ischaemic events, including MI, stroke or definite stent thrombosis (ST) was observed with respect to this new antiplatelet therapy option.

CONCLUSIONS: A maintained P2Y12 inhibitor monotherapy after shorter-duration of DAPT was suggested as a more preferable antiplatelet therapy option in patients undergoing coronary drug-eluting stents (DES) placement. Larger and more powerful randomized trials with precise sub-analyses are still necessary for further confirming these relevant benefits.}, } @article {pmid35214574, year = {2022}, author = {Zubair, AA and Razak, SA and Ngadi, MA and Al-Dhaqm, A and Yafooz, WMS and Emara, AM and Saad, A and Al-Aqrabi, H}, title = {A Cloud Computing-Based Modified Symbiotic Organisms Search Algorithm (AI) for Optimal Task Scheduling.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {4}, pages = {}, pmid = {35214574}, issn = {1424-8220}, support = {RSP-2021/260//King Saud University/ ; }, mesh = {Algorithms ; Artificial Intelligence ; *Cloud Computing ; Ecosystem ; *Symbiosis ; }, abstract = {The search algorithm based on symbiotic organisms' interactions is a relatively recent bio-inspired algorithm of the swarm intelligence field for solving numerical optimization problems. It is meant to optimize applications based on the simulation of the symbiotic relationship among the distinct species in the ecosystem. The task scheduling problem is NP complete, which makes it hard to obtain a correct solution, especially for large-scale tasks. This paper proposes a modified symbiotic organisms search-based scheduling algorithm for the efficient mapping of heterogeneous tasks to access cloud resources of different capacities. The significant contribution of this technique is the simplified representation of the algorithm's mutualism process, which uses equity as a measure of relationship characteristics or efficiency of species in the current ecosystem to move to the next generation. These relational characteristics are achieved by replacing the original mutual vector, which uses an arithmetic mean to measure the mutual characteristics with a geometric mean that enhances the survival advantage of two distinct species. The modified symbiotic organisms search algorithm (G_SOS) aims to minimize the task execution time (makespan), cost, response time, and degree of imbalance, and improve the convergence speed for an optimal solution in an IaaS cloud. The performance of the proposed technique was evaluated using a CloudSim toolkit simulator, and the percentage of improvement of the proposed G_SOS over classical SOS and PSO-SA in terms of makespan minimization ranges between 0.61-20.08% and 1.92-25.68% over a large-scale task that spans between 100 to 1000 Million Instructions (MI). The solutions are found to be better than the existing standard (SOS) technique and PSO.}, } @article {pmid35214506, year = {2022}, author = {Chen, L and Lu, Y and He, Z and Chen, Y}, title = {Online Trajectory Estimation Based on a Network-Wide Cellular Fingerprint Map.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {4}, pages = {}, pmid = {35214506}, issn = {1424-8220}, mesh = {*Cloud Computing ; }, abstract = {Cellular signaling data is widely available in mobile communications and contains abundant movement sensing information of individual travelers. Using cellular signaling data to estimate the trajectories of mobile users can benefit many location-based applications, including infectious disease tracing and screening, network flow sensing, traffic scheduling, etc. However, conventional methods rely too much on heuristic hypotheses or hardware-dependent network fingerprinting approaches. To address the above issues, NF-Track (Network-wide Fingerprinting based Tracking) is proposed to realize accurate online map-matching of cellular location sequences. In particular, neither prior assumptions such as arterial preference and less-turn preference or extra hardware-relevant parameters such as RSS and SNR are required for the proposed framework. Therefore, it has a strong generalization ability to be flexibly deployed in the cloud computing environment of telecom operators. In this architecture, a novel segment-granularity fingerprint map is put forward to provide sufficient prior knowledge. Then, a real-time trajectory estimation process is developed for precise positioning and tracking. In our experiments implemented on the urban road network, NF-Track can achieve a recall rate of 91.68% and a precision rate of 90.35% in sophisticated traffic scenes, which are superior to the state-of-the-art model-based unsupervised learning approaches.}, } @article {pmid35214456, year = {2022}, author = {Yin, Z and Xu, F and Li, Y and Fan, C and Zhang, F and Han, G and Bi, Y}, title = {A Multi-Objective Task Scheduling Strategy for Intelligent Production Line Based on Cloud-Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {4}, pages = {}, pmid = {35214456}, issn = {1424-8220}, support = {2017YFE0125300//National Key R&D Program of China/ ; }, abstract = {With the widespread use of industrial Internet technology in intelligent production lines, the number of task requests generated by smart terminals is growing exponentially. Achieving rapid response to these massive tasks becomes crucial. In this paper we focus on the multi-objective task scheduling problem of intelligent production lines and propose a task scheduling strategy based on task priority. First, we set up a cloud-fog computing architecture for intelligent production lines and built the multi-objective function for task scheduling, which minimizes the service delay and energy consumption of the tasks. In addition, the improved hybrid monarch butterfly optimization and improved ant colony optimization algorithm (HMA) are used to search for the optimal task scheduling scheme. Finally, HMA is evaluated by rigorous simulation experiments, showing that HMA outperformed other algorithms in terms of task completion rate. When the number of nodes exceeds 10, the completion rate of all tasks is greater than 90%, which well meets the real-time requirements of the corresponding tasks in the intelligent production lines. In addition, the algorithm outperforms other algorithms in terms of maximum completion rate and power consumption.}, } @article {pmid35214384, year = {2022}, author = {Shaukat, M and Alasmary, W and Alanazi, E and Shuja, J and Madani, SA and Hsu, CH}, title = {Balanced Energy-Aware and Fault-Tolerant Data Center Scheduling.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {4}, pages = {}, pmid = {35214384}, issn = {1424-8220}, abstract = {Fault tolerance, performance, and throughput have been major areas of research and development since the evolution of large-scale networks. Internet-based applications are rapidly growing, including large-scale computations, search engines, high-definition video streaming, e-commerce, and video on demand. In recent years, energy efficiency and fault tolerance have gained significant importance in data center networks and various studies directed the attention towards green computing. Data centers consume a huge amount of energy and various architectures and techniques have been proposed to improve the energy efficiency of data centers. However, there is a tradeoff between energy efficiency and fault tolerance. The objective of this study is to highlight a better tradeoff between the two extremes: (a) high energy efficiency and (b) ensuring high availability through fault tolerance and redundancy. The main objective of the proposed Energy-Aware Fault-Tolerant (EAFT) approach is to keep one level of redundancy for fault tolerance while scheduling resources for energy efficiency. The resultant energy-efficient data center network provides availability as well as fault tolerance at reduced operating cost. The main contributions of this article are: (a) we propose an Energy-Aware Fault-Tolerant (EAFT) data center network scheduler; (b) we compare EAFT with energy efficient resource scheduling techniques to provide analysis of parameters such as, workload distribution, average task per servers, and energy consumption; and (c) we highlight effects of energy efficiency techniques on the network performance of the data center.}, } @article {pmid35214297, year = {2022}, author = {Kareem, SS and Mostafa, RR and Hashim, FA and El-Bakry, HM}, title = {An Effective Feature Selection Model Using Hybrid Metaheuristic Algorithms for IoT Intrusion Detection.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {4}, pages = {}, pmid = {35214297}, issn = {1424-8220}, mesh = {Algorithms ; Artificial Intelligence ; Cloud Computing ; *Internet of Things ; Machine Learning ; }, abstract = {The increasing use of Internet of Things (IoT) applications in various aspects of our lives has created a huge amount of data. IoT applications often require the presence of many technologies such as cloud computing and fog computing, which have led to serious challenges to security. As a result of the use of these technologies, cyberattacks are also on the rise because current security methods are ineffective. Several artificial intelligence (AI)-based security solutions have been presented in recent years, including intrusion detection systems (IDS). Feature selection (FS) approaches are required for the development of intelligent analytic tools that need data pretreatment and machine-learning algorithm-performance enhancement. By reducing the number of selected features, FS aims to improve classification accuracy. This article presents a new FS method through boosting the performance of Gorilla Troops Optimizer (GTO) based on the algorithm for bird swarms (BSA). This BSA is used to boost performance exploitation of GTO in the newly developed GTO-BSA because it has a strong ability to find feasible regions with optimal solutions. As a result, the quality of the final output will increase, improving convergence. GTO-BSA's performance was evaluated using a variety of performance measures on four IoT-IDS datasets: NSL-KDD, CICIDS-2017, UNSW-NB15 and BoT-IoT. The results were compared to those of the original GTO, BSA, and several state-of-the-art techniques in the literature. According to the findings of the experiments, GTO-BSA had a better convergence rate and higher-quality solutions.}, } @article {pmid35214282, year = {2022}, author = {Arikumar, KS and Prathiba, SB and Alazab, M and Gadekallu, TR and Pandya, S and Khan, JM and Moorthy, RS}, title = {FL-PMI: Federated Learning-Based Person Movement Identification through Wearable Devices in Smart Healthcare Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {4}, pages = {}, pmid = {35214282}, issn = {1424-8220}, mesh = {Artificial Intelligence ; Cloud Computing ; Delivery of Health Care ; Humans ; *Internet of Things ; *Wearable Electronic Devices ; }, abstract = {Recent technological developments, such as the Internet of Things (IoT), artificial intelligence, edge, and cloud computing, have paved the way in transforming traditional healthcare systems into smart healthcare (SHC) systems. SHC escalates healthcare management with increased efficiency, convenience, and personalization, via use of wearable devices and connectivity, to access information with rapid responses. Wearable devices are equipped with multiple sensors to identify a person's movements. The unlabeled data acquired from these sensors are directly trained in the cloud servers, which require vast memory and high computational costs. To overcome this limitation in SHC, we propose a federated learning-based person movement identification (FL-PMI). The deep reinforcement learning (DRL) framework is leveraged in FL-PMI for auto-labeling the unlabeled data. The data are then trained using federated learning (FL), in which the edge servers allow the parameters alone to pass on the cloud, rather than passing vast amounts of sensor data. Finally, the bidirectional long short-term memory (BiLSTM) in FL-PMI classifies the data for various processes associated with the SHC. The simulation results proved the efficiency of FL-PMI, with 99.67% accuracy scores, minimized memory usage and computational costs, and reduced transmission data by 36.73%.}, } @article {pmid35214212, year = {2022}, author = {Alkhateeb, A and Catal, C and Kar, G and Mishra, A}, title = {Hybrid Blockchain Platforms for the Internet of Things (IoT): A Systematic Literature Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {4}, pages = {}, pmid = {35214212}, issn = {1424-8220}, mesh = {*Blockchain ; Cloud Computing ; Delivery of Health Care ; Information Dissemination ; *Internet of Things ; }, abstract = {In recent years, research into blockchain technology and the Internet of Things (IoT) has grown rapidly due to an increase in media coverage. Many different blockchain applications and platforms have been developed for different purposes, such as food safety monitoring, cryptocurrency exchange, and secure medical data sharing. However, blockchain platforms cannot store all the generated data. Therefore, they are supported with data warehouses, which in turn is called a hybrid blockchain platform. While several systems have been developed based on this idea, a current state-of-the-art systematic overview on the use of hybrid blockchain platforms is lacking. Therefore, a systematic literature review (SLR) study has been carried out by us to investigate the motivations for adopting them, the domains at which they were used, the adopted technologies that made this integration effective, and, finally, the challenges and possible solutions. This study shows that security, transparency, and efficiency are the top three motivations for adopting these platforms. The energy, agriculture, health, construction, manufacturing, and supply chain domains are the top domains. The most adopted technologies are cloud computing, fog computing, telecommunications, and edge computing. While there are several benefits of using hybrid blockchains, there are also several challenges reported in this study.}, } @article {pmid35207676, year = {2022}, author = {Lee, J and Jeong, J and Jung, S and Moon, J and Rho, S}, title = {Verification of De-Identification Techniques for Personal Information Using Tree-Based Methods with Shapley Values.}, journal = {Journal of personalized medicine}, volume = {12}, number = {2}, pages = {}, pmid = {35207676}, issn = {2075-4426}, support = {P0008703//Korea Institute for Advancement of Technology/ ; 2021-2018-0-01799//Institute for Information and Communications Technology Promotion/ ; }, abstract = {With the development of big data and cloud computing technologies, the importance of pseudonym information has grown. However, the tools for verifying whether the de-identification methodology is correctly applied to ensure data confidentiality and usability are insufficient. This paper proposes a verification of de-identification techniques for personal healthcare information by considering data confidentiality and usability. Data are generated and preprocessed by considering the actual statistical data, personal information datasets, and de-identification datasets based on medical data to represent the de-identification technique as a numeric dataset. Five tree-based regression models (i.e., decision tree, random forest, gradient boosting machine, extreme gradient boosting, and light gradient boosting machine) are constructed using the de-identification dataset to effectively discover nonlinear relationships between dependent and independent variables in numerical datasets. Then, the most effective model is selected from personal information data in which pseudonym processing is essential for data utilization. The Shapley additive explanation, an explainable artificial intelligence technique, is applied to the most effective model to establish pseudonym processing policies and machine learning to present a machine-learning process that selects an appropriate de-identification methodology.}, } @article {pmid35205036, year = {2022}, author = {Li, Z and Gurgel, H and Xu, L and Yang, L and Dong, J}, title = {Improving Dengue Forecasts by Using Geospatial Big Data Analysis in Google Earth Engine and the Historical Dengue Information-Aided Long Short Term Memory Modeling.}, journal = {Biology}, volume = {11}, number = {2}, pages = {}, pmid = {35205036}, issn = {2079-7737}, support = {41801336//National Natural Science Foundation of China/ ; 42061134019//National Natural Science Foundation of China/ ; QYZDB-SSW-DQC005)//Key Research Program of Frontier Sciences of the Chinese Academy of Sciences/ ; E0V00110YZ//Institute of Geographic Sciences and Natural Resources Research (IGNSRR), Chinese Academy of Sciences (CAS)/ ; }, abstract = {Timely and accurate forecasts of dengue cases are of great importance for guiding disease prevention strategies, but still face challenges from (1) time-effectiveness due to time-consuming satellite data downloading and processing, (2) weak spatial representation capability due to data dependence on administrative unit-based statistics or weather station-based observations, and (3) stagnant accuracy without the application of historical case information. Geospatial big data, cloud computing platforms (e.g., Google Earth Engine, GEE), and emerging deep learning algorithms (e.g., long short term memory, LSTM) provide new opportunities for advancing these efforts. Here, we focused on the dengue epidemics in the urban agglomeration of the Federal District of Brazil (FDB) during 2007-2019. A new framework was proposed using geospatial big data analysis in the Google Earth Engine (GEE) platform and long short term memory (LSTM) modeling for dengue case forecasts over an epidemiological week basis. We first defined a buffer zone around an impervious area as the main area of dengue transmission by considering the impervious area as a human-dominated area and used the maximum distance of the flight range of Aedes aegypti and Aedes albopictus as a buffer distance. Those zones were used as units for further attribution analyses of dengue epidemics by aggregating the pixel values into the zones. The near weekly composite of potential driving factors was generated in GEE using the epidemiological weeks during 2007-2019, from the relevant geospatial data with daily or sub-daily temporal resolution. A multi-step-ahead LSTM model was used, and the time-differenced natural log-transformed dengue cases were used as outcomes. Two modeling scenarios (with and without historical dengue cases) were set to examine the potential of historical information on dengue forecasts. The results indicate that the performance was better when historical dengue cases were used and the 5-weeks-ahead forecast had the best performance, and the peak of a large outbreak in 2019 was accurately forecasted. The proposed framework in this study suggests the potential of the GEE platform, the LSTM algorithm, as well as historical information for dengue risk forecasting, which can easily be extensively applied to other regions or globally for timely and practical dengue forecasts.}, } @article {pmid35199087, year = {2022}, author = {Schatz, MC and Philippakis, AA and Afgan, E and Banks, E and Carey, VJ and Carroll, RJ and Culotti, A and Ellrott, K and Goecks, J and Grossman, RL and Hall, IM and Hansen, KD and Lawson, J and Leek, JT and Luria, AO and Mosher, S and Morgan, M and Nekrutenko, A and O'Connor, BD and Osborn, K and Paten, B and Patterson, C and Tan, FJ and Taylor, CO and Vessio, J and Waldron, L and Wang, T and Wuichet, K}, title = {Inverting the model of genomics data sharing with the NHGRI Genomic Data Science Analysis, Visualization, and Informatics Lab-space.}, journal = {Cell genomics}, volume = {2}, number = {1}, pages = {}, pmid = {35199087}, issn = {2666-979X}, support = {U24 HG006620/HG/NHGRI NIH HHS/United States ; U24 HG010262/HG/NHGRI NIH HHS/United States ; U24 HG010263/HG/NHGRI NIH HHS/United States ; }, abstract = {The NHGRI Genomic Data Science Analysis, Visualization, and Informatics Lab-space (AnVIL; https://anvilproject.org) was developed to address a widespread community need for a unified computing environment for genomics data storage, management, and analysis. In this perspective, we present AnVIL, describe its ecosystem and interoperability with other platforms, and highlight how this platform and associated initiatives contribute to improved genomic data sharing efforts. The AnVIL is a federated cloud platform designed to manage and store genomics and related data, enable population-scale analysis, and facilitate collaboration through the sharing of data, code, and analysis results. By inverting the traditional model of data sharing, the AnVIL eliminates the need for data movement while also adding security measures for active threat detection and monitoring and provides scalable, shared computing resources for any researcher. We describe the core data management and analysis components of the AnVIL, which currently consists of Terra, Gen3, Galaxy, RStudio/Bioconductor, Dockstore, and Jupyter, and describe several flagship genomics datasets available within the AnVIL. We continue to extend and innovate the AnVIL ecosystem by implementing new capabilities, including mechanisms for interoperability and responsible data sharing, while streamlining access management. The AnVIL opens many new opportunities for analysis, collaboration, and data sharing that are needed to drive research and to make discoveries through the joint analysis of hundreds of thousands to millions of genomes along with associated clinical and molecular data types.}, } @article {pmid35194579, year = {2022}, author = {Sengupta, K and Srivastava, PR}, title = {HRNET: AI-on-Edge for Mask Detection and Social Distancing Calculation.}, journal = {SN computer science}, volume = {3}, number = {2}, pages = {157}, pmid = {35194579}, issn = {2661-8907}, abstract = {The purpose of the paper is to provide innovative emerging technology framework for community to combat epidemic situations. The paper proposes a unique outbreak response system framework based on artificial intelligence and edge computing for citizen centric services to help track and trace people eluding safety policies like mask detection and social distancing measure in public or workplace setup. The framework further provides implementation guideline in industrial setup as well for governance and contact tracing tasks. The adoption will thus lead in smart city planning and development focusing on citizen health systems contributing to improved quality of life. The conceptual framework presented is validated through quantitative data analysis via secondary data collection from researcher's public websites, GitHub repositories and renowned journals and further benchmarking were conducted for experimental results in Microsoft Azure cloud environment. The study includes selective AI models for benchmark analysis and were assessed on performance and accuracy in edge computing environment for large-scale societal setup. Overall YOLO model outperforms in object detection task and is faster enough for mask detection and HRNetV2 outperform semantic segmentation problem applied to solve social distancing task in AI-Edge inferencing environmental setup. The paper proposes new Edge-AI algorithm for building technology-oriented solutions for detecting mask in human movement and social distance. The paper enriches the technological advancement in artificial intelligence and edge computing applied to problems in society and healthcare systems. The framework further equips government agency, system providers to design and construct technology-oriented models in community setup to increase the quality of life using emerging technologies into smart urban environments.}, } @article {pmid35193188, year = {2022}, author = {Hahn, M and Arthanayaka, T and Beiersdorfer, P and Brown, GV and Savin, DW}, title = {Ion energy distribution in an electron beam ion trap inferred from simulations of the trapped ion cloud.}, journal = {Physical review. E}, volume = {105}, number = {1-2}, pages = {015204}, doi = {10.1103/PhysRevE.105.015204}, pmid = {35193188}, issn = {2470-0053}, abstract = {We have inferred the energy distribution of trapped ions in an electron beam ion trap (EBIT) from simulations of the spatial distribution of Fe^{13+} ions and a comparison with measured visible light images of the ion cloud. We simulated the cloud of Fe^{13+} ions by computing ion trajectories in the EBIT for different ion energy distributions used to initialize the trajectories. We then performed a least-squares fit to infer the ion energy distribution that best reproduced the measured ion cloud. These best-fit distributions were typically non-Maxwellian. For electron beam energies of 395-475 eV and electron beam currents of 1-9 mA, we find that the average ion energy is in the range of 10-300 eV. We also find that the average ion energy increases with increasing beam current approximately as 〈E〉≈25I_{e}eV, where I_{e} is the electron beam current in mA. We have also compared our results to Maxwell-Boltzmann-distribution ion clouds. We find that our best-fit non-thermal distributions have an 〈E〉 that is less than half that of the T from the best-fit Maxwell-Boltzmann distributions (〈E〉/q)/T=0.41±0.05.}, } @article {pmid35186242, year = {2022}, author = {Yan, M and Yan, M}, title = {Monitoring and Early Warning Analysis of the Epidemic Situation of Escherichia coli Based on Big Data Technology and Cloud Computing.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {8739447}, pmid = {35186242}, issn = {2040-2309}, mesh = {*Big Data ; *Cloud Computing ; Escherichia coli ; Humans ; Technology ; }, abstract = {The purpose of this study is to analyze the molecular epidemiological characteristics and resistance mechanisms of Escherichia coli. The study established a big data cloud computing prediction model for the epidemic mechanism of the pathogen. The study establishes the early warning, control parameters, and mathematical model of Escherichia coli infectious disease and monitors the molecular sequence of the pathogen based on discrete indicators. A nonlinear mathematical model equation was used to establish the epidemic trend model of Escherichia coli. The study shows that the use of the model can control the relative error at about 5%. The experiment proves the effectiveness of the combined model.}, } @article {pmid35186061, year = {2022}, author = {Yin, H}, title = {Public Security Video Image Detection System Construction Platform in Cloud Computing Environment.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4113803}, pmid = {35186061}, issn = {1687-5273}, mesh = {Algorithms ; *Cloud Computing ; *Computer Security ; Data Collection ; }, abstract = {The public security image detection system is an important way to assist the police in the investigation. In today's cloud computing environment, the processing power of cloud computing is gradually improving. In order to explore its application in the investigation system, this paper constructs a public security video image investigation system based on cloud computing environment. This paper uses cloud computing technology to improve the processing capacity of the system. It then combines the storage capabilities of the backend server implementation technology and the working principle of Hadoop to construct a basic model. Then combined with the storage capability of the backend server implementation technology and the working principle of Hadoop and CP-ABE encryption, decryption, and reconstruction, a basic model is constructed. This paper also designs public security video surveillance system vehicle detection and test experiments, cloud storage encryption algorithm experiments, and computational storage requirements analysis experiments. It optimizes the system based on the results of the experiment and finally compares it with the traditional investigation system. The experimental results show that the public security video image detection system based on cloud computing can improve the accuracy by 5%-25% compared with the traditional detection system. And the public security video image detection system based on cloud computing can increase the efficiency by 2%-17% compared with the traditional detection system.}, } @article {pmid35180272, year = {2022}, author = {Nai, R}, title = {The design of smart classroom for modern college English teaching under Internet of Things.}, journal = {PloS one}, volume = {17}, number = {2}, pages = {e0264176}, doi = {10.1371/journal.pone.0264176}, pmid = {35180272}, issn = {1932-6203}, mesh = {Academic Performance ; Computer-Assisted Instruction/*methods ; Humans ; *Internet of Things ; *Language ; Students/psychology ; }, abstract = {This study aims to improve the efficiency of modern college English teaching. With interactive teaching as the core element, smart classrooms as technical support, and informationization, automation, and interaction as the main body, a smart system for college English teaching is established based on cloud computing and Internet of Things (IoT). The system is built using the B/S architecture and verified by specific example data, to prove the effectiveness of the proposed smart system for college English teaching based on the IoT. It is found that the smart platform for English teaching based on the IoT not only effectively improves the stability of the system, but also enhances the personal experience of students. The coordinated operation of the various modules reduces the response time of the system. When the number of users reaches 500, the average response time of the system is 3.65 seconds, and the memory and occupancy rate of the system are reduced. Students who receive smart classrooms for teaching have a greater improvement in the test results of various aspects of English without teacher intervention. The proposed model can significantly improve the performance of poor students and reduce the gap in learning performance in the class, which provides reliable research ideas for smart teaching in modern colleges and universities.}, } @article {pmid35177634, year = {2022}, author = {Li, H and Li, M}, title = {Patent data access control and protection using blockchain technology.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {2772}, pmid = {35177634}, issn = {2045-2322}, abstract = {The purposes are to develop the patent data profoundly, control the data access process effectively, and protect the patent information and content. The traditional patent review systems are analyzed. For the present patent data security and privacy protection technologies and algorithms, the patent information data are stored on different block nodes after data fragmentation using blockchain technology. Then the data are shared using the data encryption algorism. In this way, data access control can be restricted to particular users. Finally, a patent data protection scheme based on privacy protection is proposed. The security of the scheme and the model performance are verified through simulation experiments. The time required to encrypt 10 MB files with 64-bit and 128-bit data is 35 ms and 105 ms, respectively. The proposed re-encryption algorithm only needs 1 s to decrypt 64 KB data, and only 1% of the data needs asymmetric encryption. This greatly reduces the computational overhead of encryption. Results demonstrate that the system can effectively control the access methods of users, efficiently protect the personal privacy and patent content of patent applicants, and reduce the patent office cloud computing overhead using the local resources of branches. The distributed storage methods can reduce the cloud system interaction of the patent office, thereby greatly improving the speed of encryption and ensuring data security. Compared with the state of the art methods, the proposed patent data access and protection system based on blockchain technology have greater advantages in data security and model performance. The research results can provide a research foundation and practical value for the protection and review systems of patent data.}, } @article {pmid35175690, year = {2022}, author = {Mangold, KE and Zhou, Z and Schoening, M and Moreno, JD and Silva, JR}, title = {Creating Ion Channel Kinetic Models Using Cloud Computing.}, journal = {Current protocols}, volume = {2}, number = {2}, pages = {e374}, pmid = {35175690}, issn = {2691-1299}, support = {R01 HL136553/HL/NHLBI NIH HHS/United States ; /NH/NIH HHS/United States ; R01HL136553/HB/NHLBI NIH HHS/United States ; T32-HL134635/HB/NHLBI NIH HHS/United States ; T32 HL134635/HL/NHLBI NIH HHS/United States ; }, mesh = {*Cloud Computing ; Computer Simulation ; *Ion Channels/metabolism ; Kinetics ; Software ; }, abstract = {Computational modeling of ion channels provides key insight into experimental electrophysiology results and can be used to connect channel dynamics to emergent phenomena observed at the tissue and organ levels. However, creation of these models requires substantial mathematical and computational background. This tutorial seeks to lower the barrier to creating these models by providing an automated pipeline for creating Markov models of an ion channel kinetics dataset. We start by detailing how to encode sample voltage-clamp protocols and experimental data into the program and its implementation in a cloud computing environment. We guide the reader on how to build a containerized instance, push the machine image, and finally run the routine on cluster nodes. While providing open-source code has become more standard in computational studies, this tutorial provides unprecedented detail on the use of the program and the creation of channel models, starting from inputting the raw experimental data. © 2022 Wiley Periodicals LLC. Basic Protocol: Creation of ion channel kinetic models with a cloud computing environment Alternate Protocol: Instructions for use in a standard high-performance compute cluster.}, } @article {pmid35174762, year = {2022}, author = {Sheeba, A and Padmakala, S and Subasini, CA and Karuppiah, SP}, title = {MKELM: Mixed Kernel Extreme Learning Machine using BMDA optimization for web services based heart disease prediction in smart healthcare.}, journal = {Computer methods in biomechanics and biomedical engineering}, volume = {25}, number = {10}, pages = {1180-1194}, doi = {10.1080/10255842.2022.2034795}, pmid = {35174762}, issn = {1476-8259}, mesh = {Algorithms ; Amines ; Delivery of Health Care ; *Heart Diseases ; Humans ; *Machine Learning ; }, abstract = {In recent years, cardiovascular disease becomes a prominent source of death. The web services connect other medical equipments and the computers via internet for exchanging and combining the data in novel ways. The accurate prediction of heart disease is important to prevent cardiac patients prior to heart attack. The main drawback of heart disease is delay in identifying the disease in the early stage. This objective is obtained by using the machine learning method with rich healthcare information on heart diseases. In this paper, the smart healthcare method is proposed for the prediction of heart disease using Biogeography optimization algorithm and Mexican hat wavelet to enhance Dragonfly algorithm optimization with mixed kernel based extreme learning machine (BMDA-MKELM) approach. Here, data is gathered from the two devices such as sensor nodes as well as the electronic medical records. The android based design is utilized to gather the patient data and the reliable cloud-based scheme for the data storage. For further evaluation for the prediction of heart disease, data are gathered from cloud computing services. At last, BMDA-MKELM based prediction scheme is capable to classify cardiovascular diseases. In addition to this, the proposed prediction scheme is compared with another method with respect to measures such as accuracy, precision, specificity, and sensitivity. The experimental results depict that the proposed approach achieves better results for the prediction of heart disease when compared with other methods.}, } @article {pmid35174270, year = {2022}, author = {Sang, Y and Cheng, J and Wang, B and Chen, M}, title = {A three-stage heuristic task scheduling for optimizing the service level agreement satisfaction in device-edge-cloud cooperative computing.}, journal = {PeerJ. Computer science}, volume = {8}, number = {}, pages = {e851}, pmid = {35174270}, issn = {2376-5992}, abstract = {Device-edge-cloud cooperative computing is increasingly popular as it can effectively address the problem of the resource scarcity of user devices. It is one of the most challenging issues to improve the resource efficiency by task scheduling in such computing environments. Existing works used limited resources of devices and edge servers in preference, which can lead to not full use of the abundance of cloud resources. This article studies the task scheduling problem to optimize the service level agreement satisfaction in terms of the number of tasks whose hard-deadlines are met for device-edge-cloud cooperative computing. This article first formulates the problem into a binary nonlinear programming, and then proposes a heuristic scheduling method with three stages to solve the problem in polynomial time. The first stage is trying to fully exploit the abundant cloud resources, by pre-scheduling user tasks in the resource priority order of clouds, edge servers, and local devices. In the second stage, the proposed heuristic method reschedules some tasks from edges to devices, to provide more available shared edge resources for other tasks cannot be completed locally, and schedules these tasks to edge servers. At the last stage, our method reschedules as many tasks as possible from clouds to edges or devices, to improve the resource cost. Experiment results show that our method has up to 59% better performance in service level agreement satisfaction without decreasing the resource efficiency, compared with eight of classical methods and state-of-the-art methods.}, } @article {pmid35171378, year = {2022}, author = {Zhang, L and Hu, Q and Tang, Z}, title = {Assessing the contemporary status of Nebraska's eastern saline wetlands by using a machine learning algorithm on the Google Earth Engine cloud computing platform.}, journal = {Environmental monitoring and assessment}, volume = {194}, number = {3}, pages = {193}, pmid = {35171378}, issn = {1573-2959}, mesh = {Cloud Computing ; *Ecosystem ; Environmental Monitoring/methods ; Machine Learning ; Nebraska ; Search Engine ; Soil ; *Wetlands ; }, abstract = {Nebraska's eastern saline wetlands are globally unique and highly vulnerable inland salt marsh ecosystems. This research aims to evaluate the status of the saline wetlands in eastern Nebraska to discover the conditions of saline wetland hydrology, hydrophytes, and hydraulic soil. The research adopts machine learning and Google Earth Engine to classify Sentinel-2 imagery for water and vegetation classification and the National Agriculture Imagery Program imagery for salinity conditions. Six machine learning models are applied in water, soil, and vegetation detection in the study area. The optimal model (linear kernel SVM) generates an overall accuracy of 99.95% for water classification. For saline vegetation classification, the optimal model is the gradient tree boost with an overall accuracy of 94.07%. The overall accuracy values of saline soil classification using the optimal model (linear kernel SVM) varied among different years. The results of this study show the possibility of an observation approach for continuously monitoring Nebraska's eastern saline wetlands. The water classification results show that the saline wetlands in this area all have a similar temporal water cover pattern within each year. For saline vegetation, the peak season in this area is between June and July. The years 2019 (19.00%) and 2018 (17.69%) had higher saline vegetation cover rates than 2017 (10.54%). The saline soil classification shows that the saline soil area is highly variable in response to changes in the water and vegetation conditions. The research findings provide solid scientific evidence for conservation decision-making in these saline wetland areas.}, } @article {pmid35166338, year = {2022}, author = {Fahrner, M and Föll, MC and Grüning, BA and Bernt, M and Röst, H and Schilling, O}, title = {Democratizing data-independent acquisition proteomics analysis on public cloud infrastructures via the Galaxy framework.}, journal = {GigaScience}, volume = {11}, number = {}, pages = {}, pmid = {35166338}, issn = {2047-217X}, mesh = {*Computational Biology/methods ; Mass Spectrometry ; *Proteomics/methods ; Reproducibility of Results ; Software ; }, abstract = {BACKGROUND: Data-independent acquisition (DIA) has become an important approach in global, mass spectrometric proteomic studies because it provides in-depth insights into the molecular variety of biological systems. However, DIA data analysis remains challenging owing to the high complexity and large data and sample size, which require specialized software and vast computing infrastructures. Most available open-source DIA software necessitates basic programming skills and covers only a fraction of a complete DIA data analysis. In consequence, DIA data analysis often requires usage of multiple software tools and compatibility thereof, severely limiting the usability and reproducibility.

FINDINGS: To overcome this hurdle, we have integrated a suite of open-source DIA tools in the Galaxy framework for reproducible and version-controlled data processing. The DIA suite includes OpenSwath, PyProphet, diapysef, and swath2stats. We have compiled functional Galaxy pipelines for DIA processing, which provide a web-based graphical user interface to these pre-installed and pre-configured tools for their use on freely accessible, powerful computational resources of the Galaxy framework. This approach also enables seamless sharing workflows with full configuration in addition to sharing raw data and results. We demonstrate the usability of an all-in-one DIA pipeline in Galaxy by the analysis of a spike-in case study dataset. Additionally, extensive training material is provided to further increase access for the proteomics community.

CONCLUSION: The integration of an open-source DIA analysis suite in the web-based and user-friendly Galaxy framework in combination with extensive training material empowers a broad community of researches to perform reproducible and transparent DIA data analysis.}, } @article {pmid35165304, year = {2022}, author = {Touma, S and Antaki, F and Duval, R}, title = {Development of a code-free machine learning model for the classification of cataract surgery phases.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {2398}, pmid = {35165304}, issn = {2045-2322}, mesh = {Cataract Extraction/methods/*standards ; Deep Learning ; Humans ; Lens, Crystalline/*surgery ; *Machine Learning ; Ophthalmology/*standards ; }, abstract = {This study assessed the performance of automated machine learning (AutoML) in classifying cataract surgery phases from surgical videos. Two ophthalmology trainees without coding experience designed a deep learning model in Google Cloud AutoML Video Classification for the classification of 10 different cataract surgery phases. We used two open-access publicly available datasets (total of 122 surgeries) for model training, validation and testing. External validation was performed on 10 surgeries issued from another dataset. The AutoML model demonstrated excellent discriminating performance, even outperforming bespoke deep learning models handcrafter by experts. The area under the precision-recall curve was 0.855. At the 0.5 confidence threshold cut-off, the overall performance metrics were as follows: sensitivity (81.0%), recall (77.1%), accuracy (96.0%) and F1 score (0.79). The per-segment metrics varied across the surgical phases: precision 66.7-100%, recall 46.2-100% and specificity 94.1-100%. Hydrodissection and phacoemulsification were the most accurately predicted phases (100 and 92.31% correct predictions, respectively). During external validation, the average precision was 54.2% (0.00-90.0%), the recall was 61.1% (0.00-100%) and specificity was 96.2% (91.0-99.0%). In conclusion, a code-free AutoML model can accurately classify cataract surgery phases from videos with an accuracy comparable or better than models developed by experts.}, } @article {pmid35161987, year = {2022}, author = {Bal, PK and Mohapatra, SK and Das, TK and Srinivasan, K and Hu, YC}, title = {A Joint Resource Allocation, Security with Efficient Task Scheduling in Cloud Computing Using Hybrid Machine Learning Techniques.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161987}, issn = {1424-8220}, support = {MOST 110-2622-E-197-009//Ministry of Science and Technology/ ; }, mesh = {Algorithms ; *Cloud Computing ; *Computer Security ; Machine Learning ; Resource Allocation ; }, abstract = {The rapid growth of cloud computing environment with many clients ranging from personal users to big corporate or business houses has become a challenge for cloud organizations to handle the massive volume of data and various resources in the cloud. Inefficient management of resources can degrade the performance of cloud computing. Therefore, resources must be evenly allocated to different stakeholders without compromising the organization's profit as well as users' satisfaction. A customer's request cannot be withheld indefinitely just because the fundamental resources are not free on the board. In this paper, a combined resource allocation security with efficient task scheduling in cloud computing using a hybrid machine learning (RATS-HM) technique is proposed to overcome those problems. The proposed RATS-HM techniques are given as follows: First, an improved cat swarm optimization algorithm-based short scheduler for task scheduling (ICS-TS) minimizes the make-span time and maximizes throughput. Second, a group optimization-based deep neural network (GO-DNN) for efficient resource allocation using different design constraints includes bandwidth and resource load. Third, a lightweight authentication scheme, i.e., NSUPREME is proposed for data encryption to provide security to data storage. Finally, the proposed RATS-HM technique is simulated with a different simulation setup, and the results are compared with state-of-art techniques to prove the effectiveness. The results regarding resource utilization, energy consumption, response time, etc., show that the proposed technique is superior to the existing one.}, } @article {pmid35161968, year = {2022}, author = {Fé, I and Matos, R and Dantas, J and Melo, C and Nguyen, TA and Min, D and Choi, E and Silva, FA and Maciel, PRM}, title = {Performance-Cost Trade-Off in Auto-Scaling Mechanisms for Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161968}, issn = {1424-8220}, support = {2020R1A6A1A03046811//Basic Science Research Program through the National Research Foundation of Korea(NRF) funded by the Ministry of Education/ ; 2021R1A2C209494311//the National Foundation of Korea (NRF) grant funded by the Korea government (Ministry of Science and ICT (MIST))/ ; 309335/2017-5//Brazilian National Council for Scientific and Technological Development - CNPq/ ; N0002428//'The Competency Development Program for Industry Specialist' of the Korean Ministry of Trade, Industry and Energy (MOTIE), operated by Korea Institute for Advancement of Technology (KIAT)/ ; IITP-2020-2016-0-00465//the MSIT(Ministry of Science, ICT), Korea, under the ITRC(Information Technology Research Center) support program supervised by the IITP(Institute for Information & communications Technology Planning & Evaluation)/ ; }, mesh = {*Algorithms ; *Cloud Computing ; Workload ; }, abstract = {Cloud computing has been widely adopted over the years by practitioners and companies with a variety of requirements. With a strong economic appeal, cloud computing makes possible the idea of computing as a utility, in which computing resources can be consumed and paid for with the same convenience as electricity. One of the main characteristics of cloud as a service is elasticity supported by auto-scaling capabilities. The auto-scaling cloud mechanism allows adjusting resources to meet multiple demands dynamically. The elasticity service is best represented in critical web trading and transaction systems that must satisfy a certain service level agreement (SLA), such as maximum response time limits for different types of inbound requests. Nevertheless, existing cloud infrastructures maintained by different cloud enterprises often offer different cloud service costs for equivalent SLAs upon several factors. The factors might be contract types, VM types, auto-scaling configuration parameters, and incoming workload demand. Identifying a combination of parameters that results in SLA compliance directly in the system is often sophisticated, while the manual analysis is prone to errors due to the huge number of possibilities. This paper proposes the modeling of auto-scaling mechanisms in a typical cloud infrastructure using a stochastic Petri net (SPN) and the employment of a well-established adaptive search metaheuristic (GRASP) to discover critical trade-offs between performance and cost in cloud services.The proposed SPN models enable cloud designers to estimate the metrics of cloud services in accordance with each required SLA such as the best configuration, cost, system response time, and throughput.The auto-scaling SPN model was extensively validated with 95% confidence against a real test-bed scenario with 18.000 samples. A case-study of cloud services was used to investigate the viability of this method and to evaluate the adoptability of the proposed auto-scaling model in practice. On the other hand, the proposed optimization algorithm enables the identification of economic system configuration and parameterization to satisfy required SLA and budget constraints. The adoption of the metaheuristic GRASP approach and the modeling of auto-scaling mechanisms in this work can help search for the optimized-quality solution and operational management for cloud services in practice.}, } @article {pmid35161951, year = {2022}, author = {Jaber, MM and Alameri, T and Ali, MH and Alsyouf, A and Al-Bsheish, M and Aldhmadi, BK and Ali, SY and Abd, SK and Ali, SM and Albaker, W and Jarrar, M}, title = {Remotely Monitoring COVID-19 Patient Health Condition Using Metaheuristics Convolute Networks from IoT-Based Wearable Device Health Data.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161951}, issn = {1424-8220}, mesh = {*COVID-19 ; Delivery of Health Care ; Humans ; Monitoring, Physiologic ; SARS-CoV-2 ; *Wearable Electronic Devices ; }, abstract = {Today, COVID-19-patient health monitoring and management are major public health challenges for technologies. This research monitored COVID-19 patients by using the Internet of Things. IoT-based collected real-time GPS helps alert the patient automatically to reduce risk factors. Wearable IoT devices are attached to the human body, interconnected with edge nodes, to investigate data for making health-condition decisions. This system uses the wearable IoT sensor, cloud, and web layers to explore the patient's health condition remotely. Every layer has specific functionality in the COVID-19 symptoms' monitoring process. The first layer collects the patient health information, which is transferred to the second layer that stores that data in the cloud. The network examines health data and alerts the patients, thus helping users take immediate actions. Finally, the web layer notifies family members to take appropriate steps. This optimized deep-learning model allows for the management and monitoring for further analysis.}, } @article {pmid35161853, year = {2022}, author = {Adee, R and Mouratidis, H}, title = {A Dynamic Four-Step Data Security Model for Data in Cloud Computing Based on Cryptography and Steganography.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161853}, issn = {1424-8220}, abstract = {Cloud computing is a rapidly expanding field. It allows users to access computer system resources as needed, particularly data storage and computational power, without managing them directly. This paper aims to create a data security model based on cryptography and steganography for data in cloud computing that seeks to reduce existing security and privacy concerns, such as data loss, data manipulation, and data theft. To identify the problem and determine its core cause, we studied various literature on existing cloud computing security models. This study utilizes design science research methodology. The design science research approach includes problem identification, requirements elicitation, artifact design and development, demonstration, and assessment. Design thinking and the Python programming language are used to build the artifact, and discussion about its working is represented using histograms, tables, and algorithms. This paper's output is a four-step data security model based on Rivest-Shamir-Adleman, Advanced Encryption Standard, and identity-based encryption algorithms alongside Least Significant Bit steganography. The four steps are data protection and security through encryption algorithms, steganography, data backup and recovery, and data sharing. This proposed approach ensures more cloud data redundancy, flexibility, efficiency, and security by protecting data confidentiality, privacy, and integrity from attackers.}, } @article {pmid35161775, year = {2022}, author = {Popović, I and Radovanovic, I and Vajs, I and Drajic, D and Gligorić, N}, title = {Building Low-Cost Sensing Infrastructure for Air Quality Monitoring in Urban Areas Based on Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161775}, issn = {1424-8220}, support = {451-03-68/2020-14/200223//Ministry of Education, Science and Technological Development of the Republic of Serbia/ ; }, mesh = {*Air Pollution ; *Cloud Computing ; }, abstract = {Because the number of air quality measurement stations governed by a public authority is limited, many methodologies have been developed in order to integrate low-cost sensors and to improve the spatial density of air quality measurements. However, at the large-scale level, the integration of a huge number of sensors brings many challenges. The volume, velocity and processing requirements regarding the management of the sensor life cycle and the operation of system services overcome the capabilities of the centralized cloud model. In this paper, we present the methodology and the architectural framework for building large-scale sensing infrastructure for air quality monitoring applicable in urban scenarios. The proposed tiered architectural solution based on the adopted fog computing model is capable of handling the processing requirements of a large-scale application, while at the same time sustaining real-time performance. Furthermore, the proposed methodology introduces the collection of methods for the management of edge-tier node operation through different phases of the node life cycle, including the methods for node commission, provision, fault detection and recovery. The related sensor-side processing is encapsulated in the form of microservices that reside on the different tiers of system architecture. The operation of system microservices and their collaboration was verified through the presented experimental case study.}, } @article {pmid35161745, year = {2022}, author = {Lăcătușu, F and Ionita, AD and Lăcătușu, M and Olteanu, A}, title = {Performance Evaluation of Information Gathering from Edge Devices in a Complex of Smart Buildings.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161745}, issn = {1424-8220}, mesh = {*Cloud Computing ; *Computers ; Monitoring, Physiologic ; }, abstract = {The use of monitoring systems based on cloud computing has become common for smart buildings. However, the dilemma of centralization versus decentralization, in terms of gathering information and making the right decisions based on it, remains. Performance, dependent on the system design, does matter for emergency detection, where response time and loading behavior become very important. We studied several design options based on edge computing and containers for a smart building monitoring system that sends alerts to the responsible personnel when necessary. The study evaluated performance, including a qualitative analysis and load testing, for our experimental settings. From 700+ edge nodes, we obtained response times that were 30% lower for the public cloud versus the local solution. For up to 100 edge nodes, the values were better for the latter, and in between, they were rather similar. Based on an interpretation of the results, we developed recommendations for five real-world configurations, and we present the design choices adopted in our development for a complex of smart buildings.}, } @article {pmid35161741, year = {2022}, author = {Kasnesis, P and Doulgerakis, V and Uzunidis, D and Kogias, DG and Funcia, SI and González, MB and Giannousis, C and Patrikakis, CZ}, title = {Deep Learning Empowered Wearable-Based Behavior Recognition for Search and Rescue Dogs.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161741}, issn = {1424-8220}, support = {833507//European Commission/ ; }, mesh = {Animals ; *Deep Learning ; Dogs ; Neural Networks, Computer ; *Wearable Electronic Devices ; Working Dogs ; }, abstract = {Search and Rescue (SaR) dogs are important assets in the hands of first responders, as they have the ability to locate the victim even in cases where the vision and or the sound is limited, due to their inherent talents in olfactory and auditory senses. In this work, we propose a deep-learning-assisted implementation incorporating a wearable device, a base station, a mobile application, and a cloud-based infrastructure that can first monitor in real-time the activity, the audio signals, and the location of a SaR dog, and second, recognize and alert the rescuing team whenever the SaR dog spots a victim. For this purpose, we employed deep Convolutional Neural Networks (CNN) both for the activity recognition and the sound classification, which are trained using data from inertial sensors, such as 3-axial accelerometer and gyroscope and from the wearable's microphone, respectively. The developed deep learning models were deployed on the wearable device, while the overall proposed implementation was validated in two discrete search and rescue scenarios, managing to successfully spot the victim (i.e., obtained F1-score more than 99%) and inform the rescue team in real-time for both scenarios.}, } @article {pmid35161675, year = {2022}, author = {Ometov, A and Molua, OL and Komarov, M and Nurmi, J}, title = {A Survey of Security in Cloud, Edge, and Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161675}, issn = {1424-8220}, mesh = {*Cloud Computing ; Computer Security ; *Ecosystem ; Privacy ; Surveys and Questionnaires ; }, abstract = {The field of information security and privacy is currently attracting a lot of research interest. Simultaneously, different computing paradigms from Cloud computing to Edge computing are already forming a unique ecosystem with different architectures, storage, and processing capabilities. The heterogeneity of this ecosystem comes with certain limitations, particularly security and privacy challenges. This systematic literature review aims to identify similarities, differences, main attacks, and countermeasures in the various paradigms mentioned. The main determining outcome points out the essential security and privacy threats. The presented results also outline important similarities and differences in Cloud, Edge, and Fog computing paradigms. Finally, the work identified that the heterogeneity of such an ecosystem does have issues and poses a great setback in the deployment of security and privacy mechanisms to counter security attacks and privacy leakages. Different deployment techniques were found in the review studies as ways to mitigate and enhance security and privacy shortcomings.}, } @article {pmid35161665, year = {2022}, author = {Nabi, S and Ahmad, M and Ibrahim, M and Hamam, H}, title = {AdPSO: Adaptive PSO-Based Task Scheduling Approach for Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161665}, issn = {1424-8220}, mesh = {*Algorithms ; *Cloud Computing ; Heuristics ; Industry ; }, abstract = {Cloud computing has emerged as the most favorable computing platform for researchers and industry. The load balanced task scheduling has emerged as an important and challenging research problem in the Cloud computing. Swarm intelligence-based meta-heuristic algorithms are considered more suitable for Cloud scheduling and load balancing. The optimization procedure of swarm intelligence-based meta-heuristics consists of two major components that are the local and global search. These algorithms find the best position through the local and global search. To achieve an optimized mapping strategy for tasks to the resources, a balance between local and global search plays an effective role. The inertia weight is an important control attribute to effectively adjust the local and global search process. There are many inertia weight strategies; however, the existing approaches still require fine-tuning to achieve optimum scheduling. The selection of a suitable inertia weight strategy is also an important factor. This paper contributed an adaptive Particle Swarm Optimisation (PSO) based task scheduling approach that reduces the task execution time, and increases throughput and Average Resource Utilization Ratio (ARUR). Moreover, an adaptive inertia weight strategy namely Linearly Descending and Adaptive Inertia Weight (LDAIW) is introduced. The proposed scheduling approach provides a better balance between local and global search leading to an optimized task scheduling. The performance of the proposed approach has been evaluated and compared against five renown PSO based inertia weight strategies concerning makespan and throughput. The experiments are then extended and compared the proposed approach against the other four renowned meta-heuristic scheduling approaches. Analysis of the simulated experimentation reveals that the proposed approach attained up to 10%, 12% and 60% improvement for makespan, throughput and ARUR respectively.}, } @article {pmid35161645, year = {2022}, author = {Pincheira, M and Antonini, M and Vecchio, M}, title = {Integrating the IoT and Blockchain Technology for the Next Generation of Mining Inspection Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161645}, issn = {1424-8220}, mesh = {*Blockchain ; Cloud Computing ; Technology ; }, abstract = {Inspection of mining assets is a crucial part of the maintenance process and is of interest to several stakeholders (e.g., OEMs, owners, users, and inspectors). Inspections require an inspector to verify several characteristics of the assets onsite, typically using legacy and poorly digitized procedures. Thus, many research opportunities arise from the adoption of digital technologies to make these procedures more efficient, reliable, and straightforward. In addition to cloud computing, the ubiquitous presence of modern mobile devices, new measurement tools with embedded connectivity capabilities, and blockchain technologies could greatly improve trust and transparency between the stakeholders interested in the inspection. However, there has been little discussion on integrating these technologies into the mining domain. This paper presents and evaluates an end-to-end system to conduct inspections using mobile devices that directly interact with constrained IoT sensor devices. Furthermore, our proposal provides a method to integrate constrained IoT devices as smart measuring tools that directly interact with a blockchain system, guaranteeing data integrity and increasing the trustworthiness of the data. Finally, we highlight the benefits of our proposed architecture by evaluating a real case study in a mining inspection scenario.}, } @article {pmid35161623, year = {2022}, author = {Qafzezi, E and Bylykbashi, K and Ampririt, P and Ikeda, M and Matsuo, K and Barolli, L}, title = {An Intelligent Approach for Cloud-Fog-Edge Computing SDN-VANETs Based on Fuzzy Logic: Effect of Different Parameters on Coordination and Management of Resources.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161623}, issn = {1424-8220}, abstract = {The integration of cloud-fog-edge computing in Software-Defined Vehicular Ad hoc Networks (SDN-VANETs) brings a new paradigm that provides the needed resources for supporting a myriad of emerging applications. While an abundance of resources may offer many benefits, it also causes management problems. In this work, we propose an intelligent approach to flexibly and efficiently manage resources in these networks. The proposed approach makes use of an integrated fuzzy logic system that determines the most appropriate resources that vehicles should use when set under various circumstances. These circumstances cover the quality of the network created between the vehicles, its size and longevity, the number of available resources, and the requirements of applications. We evaluated the proposed approach by computer simulations. The results demonstrate the feasibility of the proposed approach in coordinating and managing the available SDN-VANETs resources.}, } @article {pmid35161596, year = {2022}, author = {Yousif, A and Alqhtani, SM and Bashir, MB and Ali, A and Hamza, R and Hassan, A and Tawfeeg, TM}, title = {Greedy Firefly Algorithm for Optimizing Job Scheduling in IoT Grid Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161596}, issn = {1424-8220}, support = {NU/IFC/ENT/01/013//The deputyship for research and innovation, Ministry of Education in Saudi Arabia/ ; }, abstract = {The Internet of Things (IoT) is defined as interconnected digital and mechanical devices with intelligent and interactive data transmission features over a defined network. The ability of the IoT to collect, analyze and mine data into information and knowledge motivates the integration of IoT with grid and cloud computing. New job scheduling techniques are crucial for the effective integration and management of IoT with grid computing as they provide optimal computational solutions. The computational grid is a modern technology that enables distributed computing to take advantage of a organization's resources in order to handle complex computational problems. However, the scheduling process is considered an NP-hard problem due to the heterogeneity of resources and management systems in the IoT grid. This paper proposed a Greedy Firefly Algorithm (GFA) for jobs scheduling in the grid environment. In the proposed greedy firefly algorithm, a greedy method is utilized as a local search mechanism to enhance the rate of convergence and efficiency of schedules produced by the standard firefly algorithm. Several experiments were conducted using the GridSim toolkit to evaluate the proposed greedy firefly algorithm's performance. The study measured several sizes of real grid computing workload traces, starting with lightweight traces with only 500 jobs, then typical with 3000 to 7000 jobs, and finally heavy load containing 8000 to 10,000 jobs. The experiment results revealed that the greedy firefly algorithm could insignificantly reduce the makespan makespan and execution times of the IoT grid scheduling process as compared to other evaluated scheduling methods. Furthermore, the proposed greedy firefly algorithm converges on large search spacefaster , making it suitable for large-scale IoT grid environments.}, } @article {pmid35161586, year = {2022}, author = {Tassetti, AN and Galdelli, A and Pulcinella, J and Mancini, A and Bolognini, L}, title = {Addressing Gaps in Small-Scale Fisheries: A Low-Cost Tracking System.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161586}, issn = {1424-8220}, support = {1025515//Interreg V-A Italy-Croatia CBC Programme 2014-2020, Strategic calls for proposals, Project ARGOS - ShARed GOvernance of Sustainable fisheries and aquaculture activities as leverage to protect marine resources in the Adriatic Sea/ ; }, mesh = {Artificial Intelligence ; *Conservation of Natural Resources ; Data Collection ; *Fisheries ; Policy ; }, abstract = {During the last decade vessel-position-recording devices, such as the Vessel Monitoring System and the Automatic Identification System, have increasingly given accurate spatial and quantitative information of industrial fisheries. On the other hand, small-scale fisheries (vessels below 12 m) remain untracked and largely unregulated even though they play an important socio-economic and cultural role in European waters and coastal communities and account for most of the total EU fishing fleet. The typically low-technological capacity of these small-scale fishing boats-for which space and power onboard are often limited-as well their reduced operative range encourage the development of efficient, low-cost, and low-burden tracking solutions. In this context, we designed a cost-effective and scalable prototypic architecture to gather and process positional data from small-scale vessels, making use of a LoRaWAN/cellular network. Data collected by our first installation are presented, as well as its preliminary processing. The emergence of a such low-cost and open-source technology coupled to artificial intelligence could open new opportunities for equipping small-scale vessels, collecting their trajectory data, and estimating their fishing effort (information which has historically not been present). It enables a new monitoring strategy that could effectively include small-scale fleets and support the design of new policies oriented to inform coastal resource and fisheries management.}, } @article {pmid35161458, year = {2022}, author = {Berta, R and Bellotti, F and De Gloria, A and Lazzaroni, L}, title = {Assessing Versatility of a Generic End-to-End Platform for IoT Ecosystem Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161458}, issn = {1424-8220}, mesh = {*Ecosystem ; }, abstract = {Availability of efficient development tools for data-rich IoT applications is becoming ever more important. Such tools should support cross-platform deployment and seamless and effective applicability in a variety of domains. In this view, we assessed the versatility of an edge-to-cloud system featuring Measurify, a framework for managing smart things. The framework exposes to developers a set of measurement-oriented resources that can be used in different contexts. The tool has been assessed in the development of end-to-end IoT applications in six Electronic and Information Technologies Engineering BSc theses that have highlighted the potential of such a system, both from a didactic and a professional point of view. The main design abstractions of the system (i.e., generic sensor configuration, simple language with chainable operations for processing data on the edge, seamless WiFi/GSM communication) allowed developers to be productive and focus on the application requirements and the high-level design choices needed to define the edge system (microcontroller and its sensors), avoiding the large set-up times necessary to start a solution from scratch. The experience also highlighted some usability issues that will be addressed in an upcoming release of the system.}, } @article {pmid35143670, year = {2022}, author = {Grealey, J and Lannelongue, L and Saw, WY and Marten, J and Méric, G and Ruiz-Carmona, S and Inouye, M}, title = {The Carbon Footprint of Bioinformatics.}, journal = {Molecular biology and evolution}, volume = {39}, number = {3}, pages = {}, pmid = {35143670}, issn = {1537-1719}, support = {MR/S502443/1/MRC_/Medical Research Council/United Kingdom ; BRC-1215-20014/DH_/Department of Health/United Kingdom ; MR/L003120/1/MRC_/Medical Research Council/United Kingdom ; RG/18/13/33946/BHF_/British Heart Foundation/United Kingdom ; /WT_/Wellcome Trust/United Kingdom ; /CSO_/Chief Scientist Office/United Kingdom ; RG/13/13/30194/BHF_/British Heart Foundation/United Kingdom ; }, mesh = {Algorithms ; *Carbon Footprint ; *Computational Biology ; Genome-Wide Association Study ; Software ; }, abstract = {Bioinformatic research relies on large-scale computational infrastructures which have a nonzero carbon footprint but so far, no study has quantified the environmental costs of bioinformatic tools and commonly run analyses. In this work, we estimate the carbon footprint of bioinformatics (in kilograms of CO2 equivalent units, kgCO2e) using the freely available Green Algorithms calculator (www.green-algorithms.org, last accessed 2022). We assessed 1) bioinformatic approaches in genome-wide association studies (GWAS), RNA sequencing, genome assembly, metagenomics, phylogenetics, and molecular simulations, as well as 2) computation strategies, such as parallelization, CPU (central processing unit) versus GPU (graphics processing unit), cloud versus local computing infrastructure, and geography. In particular, we found that biobank-scale GWAS emitted substantial kgCO2e and simple software upgrades could make it greener, for example, upgrading from BOLT-LMM v1 to v2.3 reduced carbon footprint by 73%. Moreover, switching from the average data center to a more efficient one can reduce carbon footprint by approximately 34%. Memory over-allocation can also be a substantial contributor to an algorithm's greenhouse gas emissions. The use of faster processors or greater parallelization reduces running time but can lead to greater carbon footprint. Finally, we provide guidance on how researchers can reduce power consumption and minimize kgCO2e. Overall, this work elucidates the carbon footprint of common analyses in bioinformatics and provides solutions which empower a move toward greener research.}, } @article {pmid35140775, year = {2022}, author = {Wang, R and Chen, X}, title = {Research on Agricultural Product Traceability Technology (Economic Value) Based on Information Supervision and Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4687639}, pmid = {35140775}, issn = {1687-5273}, mesh = {*Blockchain ; *Cloud Computing ; Information Dissemination ; Reproducibility of Results ; Technology ; }, abstract = {Traditional agricultural product traceability system adopts centralized storage, and the traceability process is solidified, which results in the low reliability of traceability results and the poor flexibility of the system. Aiming to solve this problem, blockchain technology is applied to supply chain traceability, and a supply chain traceability system based on sidechain technology is proposed. Goods management, information sharing, and product traceability in supply chain are realized through Ethereum smart contract. The sidechain technology is adopted to expand Ethereum so that it can meet the needs of practical applications. The experiment results show that the proposed system has a transaction function and information sharing function. Compared with similar trading systems, the proposed system has more advantages in throughput and security.}, } @article {pmid35136707, year = {2023}, author = {Mahajan, HB and Rashid, AS and Junnarkar, AA and Uke, N and Deshpande, SD and Futane, PR and Alkhayyat, A and Alhayani, B}, title = {Integration of Healthcare 4.0 and blockchain into secure cloud-based electronic health records systems.}, journal = {Applied nanoscience}, volume = {13}, number = {3}, pages = {2329-2342}, pmid = {35136707}, issn = {2190-5509}, abstract = {Since the last decade, cloud-based electronic health records (EHRs) have gained significant attention to enable remote patient monitoring. The recent development of Healthcare 4.0 using the Internet of Things (IoT) components and cloud computing to access medical operations remotely has gained the researcher's attention from a smart city perspective. Healthcare 4.0 mainly consisted of periodic medical data sensing, aggregation, data transmission, data sharing, and data storage. The sensitive and personal data of patients lead to several challenges while protecting it from hackers. Therefore storing, accessing, and sharing the patient medical information on the cloud needs security attention that data should not be compromised by the authorized user's components of E-healthcare systems. To achieve secure medical data storage, sharing, and accessing in cloud service provider, several cryptography algorithms are designed so far. However, such conventional solutions failed to achieve the trade-off between the requirements of EHR security solutions such as computational efficiency, service side verification, user side verifications, without the trusted third party, and strong security. Blockchain-based security solutions gained significant attention in the recent past due to the ability to provide strong security for data storage and sharing with the minimum computation efforts. The blockchain made focused on bitcoin technology among the researchers. Utilizing the blockchain which secure healthcare records management has been of recent interest. This paper presents the systematic study of modern blockchain-based solutions for securing medical data with or without cloud computing. We implement and evaluate the different methods using blockchain in this paper. According to the research studies, the research gaps, challenges, and future roadmap are the outcomes of this paper that boost emerging Healthcare 4.0 technology.}, } @article {pmid35136669, year = {2022}, author = {Raulerson, CK and Villa, EC and Mathews, JA and Wakeland, B and Xu, Y and Gagan, J and Cantarel, BL}, title = {SCHOOL: Software for Clinical Health in Oncology for Omics Laboratories.}, journal = {Journal of pathology informatics}, volume = {13}, number = {}, pages = {1}, pmid = {35136669}, issn = {2229-5089}, abstract = {Bioinformatics analysis is a key element in the development of in-house next-generation sequencing assays for tumor genetic profiling that can include both tumor DNA and RNA with comparisons to matched-normal DNA in select cases. Bioinformatics analysis encompasses a computationally heavy component that requires a high-performance computing component and an assay-dependent quality assessment, aggregation, and data cleaning component. Although there are free, open-source solutions and fee-for-use commercial services for the computationally heavy component, these solutions and services can lack the options commonly utilized in increasingly complex genomic assays. Additionally, the cost to purchase commercial solutions or implement and maintain open-source solutions can be out of reach for many small clinical laboratories. Here, we present Software for Clinical Health in Oncology for Omics Laboratories (SCHOOL), a collection of genomics analysis workflows that (i) can be easily installed on any platform; (ii) run on the cloud with a user-friendly interface; and (iii) include the detection of single nucleotide variants, insertions/deletions, copy number variants (CNVs), and translocations from RNA and DNA sequencing. These workflows contain elements for customization based on target panel and assay design, including somatic mutational analysis with a matched-normal, microsatellite stability analysis, and CNV analysis with a single nucleotide polymorphism backbone. All of the features of SCHOOL have been designed to run on any computer system, where software dependencies have been containerized. SCHOOL has been built into apps with workflows that can be run on a cloud platform such as DNANexus using their point-and-click graphical interface, which could be automated for high-throughput laboratories.}, } @article {pmid35136576, year = {2021}, author = {Albanese, D and Donati, C}, title = {Large-scale quality assessment of prokaryotic genomes with metashot/prok-quality.}, journal = {F1000Research}, volume = {10}, number = {}, pages = {822}, pmid = {35136576}, issn = {2046-1402}, mesh = {Archaea/genetics ; *Metagenome ; *Metagenomics/methods ; Prokaryotic Cells ; Reproducibility of Results ; }, abstract = {Metagenomic sequencing allows large-scale identification and genomic characterization. Binning is the process of recovering genomes from complex mixtures of sequence fragments (metagenome contigs) of unknown bacteria and archaeal species. Assessing the quality of genomes recovered from metagenomes requires the use of complex pipelines involving many independent steps, often difficult to reproduce and maintain. A comprehensive, automated and easy-to-use computational workflow for the quality assessment of draft prokaryotic genomes, based on container technology, would greatly improve reproducibility and reusability of published results. We present metashot/prok-quality, a container-enabled Nextflow pipeline for quality assessment and genome dereplication. The metashot/prok-quality tool produces genome quality reports that are compliant with the Minimum Information about a Metagenome-Assembled Genome (MIMAG) standard, and can run out-of-the-box on any platform that supports Nextflow, Docker or Singularity, including computing clusters or batch infrastructures in the cloud. metashot/prok-quality is part of the metashot collection of analysis pipelines. Workflow and documentation are available under GPL3 licence on GitHub.}, } @article {pmid35135213, year = {2022}, author = {Habibi, H and Rasoolzadegan, A and Mashmool, A and Band, SS and Chronopoulos, AT and Mosavi, A}, title = {SaaSRec+: a new context-aware recommendation method for SaaS services.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {19}, number = {2}, pages = {1471-1495}, doi = {10.3934/mbe.2022068}, pmid = {35135213}, issn = {1551-0018}, mesh = {*Cloud Computing ; Cluster Analysis ; }, abstract = {Cloud computing is an attractive model that provides users with a variety of services. Thus, the number of cloud services on the market is growing rapidly. Therefore, choosing the proper cloud service is an important challenge. Another major challenge is the availability of diverse cloud services with similar performance, which makes it difficult for users to choose the cloud service that suits their needs. Therefore, the existing service selection approaches is not able to solve the problem, and cloud service recommendation has become an essential and important need. In this paper, we present a new way for context-aware cloud service recommendation. Our proposed method seeks to solve the weakness in user clustering, which itself is due to reasons such as 1) lack of full use of contextual information such as cloud service placement, and 2) inaccurate method of determining the similarity of two vectors. The evaluation conducted by the WSDream dataset indicates a reduction in the cloud service recommendation process error rate. The volume of data used in the evaluation of this paper is 5 times that of the basic method. Also, according to the T-test, the service recommendation performance in the proposed method is significant.}, } @article {pmid35131433, year = {2022}, author = {Schirner, M and Domide, L and Perdikis, D and Triebkorn, P and Stefanovski, L and Pai, R and Prodan, P and Valean, B and Palmer, J and Langford, C and Blickensdörfer, A and van der Vlag, M and Diaz-Pier, S and Peyser, A and Klijn, W and Pleiter, D and Nahm, A and Schmid, O and Woodman, M and Zehl, L and Fousek, J and Petkoski, S and Kusch, L and Hashemi, M and Marinazzo, D and Mangin, JF and Flöel, A and Akintoye, S and Stahl, BC and Cepic, M and Johnson, E and Deco, G and McIntosh, AR and Hilgetag, CC and Morgan, M and Schuller, B and Upton, A and McMurtrie, C and Dickscheid, T and Bjaalie, JG and Amunts, K and Mersmann, J and Jirsa, V and Ritter, P}, title = {Brain simulation as a cloud service: The Virtual Brain on EBRAINS.}, journal = {NeuroImage}, volume = {251}, number = {}, pages = {118973}, doi = {10.1016/j.neuroimage.2022.118973}, pmid = {35131433}, issn = {1095-9572}, mesh = {Animals ; Bayes Theorem ; *Brain/diagnostic imaging ; *Cloud Computing ; Computer Simulation ; Humans ; Magnetic Resonance Imaging/methods ; Mice ; Software ; }, abstract = {The Virtual Brain (TVB) is now available as open-source services on the cloud research platform EBRAINS (ebrains.eu). It offers software for constructing, simulating and analysing brain network models including the TVB simulator; magnetic resonance imaging (MRI) processing pipelines to extract structural and functional brain networks; combined simulation of large-scale brain networks with small-scale spiking networks; automatic conversion of user-specified model equations into fast simulation code; simulation-ready brain models of patients and healthy volunteers; Bayesian parameter optimization in epilepsy patient models; data and software for mouse brain simulation; and extensive educational material. TVB cloud services facilitate reproducible online collaboration and discovery of data assets, models, and software embedded in scalable and secure workflows, a precondition for research on large cohort data sets, better generalizability, and clinical translation.}, } @article {pmid35129073, year = {2024}, author = {Bhardwaj, A and Kumar, M and Alshehri, M and Keshta, I and Abugabah, A and Sharma, SK}, title = {Smart water management framework for irrigation in agriculture.}, journal = {Environmental technology}, volume = {45}, number = {12}, pages = {2320-2334}, doi = {10.1080/09593330.2022.2039783}, pmid = {35129073}, issn = {1479-487X}, mesh = {*Drinking Water ; Agriculture ; Farms ; Climate ; Water Supply ; }, abstract = {Global demand and pressure on natural resources is increasing, which is greater on the availability of pure and safe drinking water. The use of new-age technologies including Smart sensors, embedded devices, and Cloud computing can help deliver efficient and safe management for provisioning drinking water for consumers and irrigation for agriculture. The management actions combined with real-time data gathering, monitoring, and alerting with proactive actions, prevent issues from occurring. This research presents a secure and smart research framework to enhance the existing irrigation system. This involves a low-budget irrigation model that can provide automated control and requirements as per the season, climate by using smart device sensors and Cloud communications. The authors presented four unique algorithms and water management processing rules. This also includes alerting scenarios for device and component failures and water leakage by automatically switching to alternative mode and sending alert messages about the faults to resolve the operational failures.The objective of this research is to identify new-age technologies for providing efficient and effective farming methods and investigate Smart IoT-based water management. The highlights of this research are to investigate IoT water management systems using algorithms for irrigation farming, for which this research presents a secure and smart research framework. This involves a low-budget irrigation model that provides automated control and requirements as per the season, climate by using smart device sensors and Cloud communications. Alerts for device and component failures and water leakage are also in-built for switching to alternative mode to resolve the operational failures.}, } @article {pmid35126905, year = {2022}, author = {Mohana, J and Yakkala, B and Vimalnath, S and Benson Mansingh, PM and Yuvaraj, N and Srihari, K and Sasikala, G and Mahalakshmi, V and Yasir Abdullah, R and Sundramurthy, VP}, title = {Application of Internet of Things on the Healthcare Field Using Convolutional Neural Network Processing.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {1892123}, pmid = {35126905}, issn = {2040-2309}, mesh = {Algorithms ; Cloud Computing ; Delivery of Health Care ; Humans ; *Internet of Things ; Neural Networks, Computer ; }, abstract = {Population at risk can benefit greatly from remote health monitoring because it allows for early detection and treatment. Because of recent advances in Internet-of-Things (IoT) paradigms, such monitoring systems are now available everywhere. Due to the essential nature of the patients being monitored, these systems demand a high level of quality in aspects such as availability and accuracy. In health applications, where a lot of data are accessible, deep learning algorithms have the potential to perform well. In this paper, we develop a deep learning architecture called the convolutional neural network (CNN), which we examine in this study to see if it can be implemented. The study uses the IoT system with a centralised cloud server, where it is considered as an ideal input data acquisition module. The study uses cloud computing resources by distributing CNN operations to the servers with outsourced fitness functions to be performed at the edge. The results of the simulation show that the proposed method achieves a higher rate of classifying the input instances from the data acquisition tools than other methods. From the results, it is seen that the proposed CNN achieves an average accurate rate of 99.6% on training datasets and 86.3% on testing datasets.}, } @article {pmid35126501, year = {2022}, author = {Khan, R and Srivastava, AK and Gupta, M and Kumari, P and Kumar, S}, title = {Medicolite-Machine Learning-Based Patient Care Model.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8109147}, pmid = {35126501}, issn = {1687-5273}, mesh = {*Cloud Computing ; Computer Security ; Delivery of Health Care ; Humans ; *Machine Learning ; Patient Care ; }, abstract = {This paper discusses the machine learning effect on healthcare and the development of an application named "Medicolite" in which various modules have been developed for convenience with health-related problems like issues with diet. It also provides online doctor appointments from home and medication through the phone. A healthcare system is "Smart" when it can decide on its own and can prescribe patients life-saving drugs. Machine learning helps in capturing data that are large and contain sensitive information about the patients, so data security is one of the important aspects of this system. It is a health system that uses trending technologies and mobile internet to connect people and healthcare institutions to make them aware of their health condition by intelligently responding to their questions. It perceives information through machine learning and processes this information using cloud computing. With the new technologies, the system decreases the manual intervention in healthcare. Every single piece of information has been saved in the system and the user can access it any time. Furthermore, users can take appointments at any time without standing in a queue. In this paper, the authors proposed a CNN-based classifier. This CNN-based classifier is faster than SVM-based classifier. When these two classifiers are compared based on training and testing sessions, it has been found that the CNN has taken less time (30 seconds) compared to SVM (58 seconds).}, } @article {pmid35126487, year = {2022}, author = {Bukhari, MM and Ghazal, TM and Abbas, S and Khan, MA and Farooq, U and Wahbah, H and Ahmad, M and Adnan, KM}, title = {An Intelligent Proposed Model for Task Offloading in Fog-Cloud Collaboration Using Logistics Regression.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {3606068}, pmid = {35126487}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; Computer Simulation ; Logistic Models ; Reproducibility of Results ; }, abstract = {Smart applications and intelligent systems are being developed that are self-reliant, adaptive, and knowledge-based in nature. Emergency and disaster management, aerospace, healthcare, IoT, and mobile applications, among them, revolutionize the world of computing. Applications with a large number of growing devices have transformed the current design of centralized cloud impractical. Despite the use of 5G technology, delay-sensitive applications and cloud cannot go parallel due to exceeding threshold values of certain parameters like latency, bandwidth, response time, etc. Middleware proves to be a better solution to cope up with these issues while satisfying the high requirements task offloading standards. Fog computing is recommended middleware in this research article in view of the fact that it provides the services to the edge of the network; delay-sensitive applications can be entertained effectively. On the contrary, fog nodes contain a limited set of resources that may not process all tasks, especially of computation-intensive applications. Additionally, fog is not the replacement of the cloud, rather supplement to the cloud, both behave like counterparts and offer their services correspondingly to compliance the task needs but fog computing has relatively closer proximity to the devices comparatively cloud. The problem arises when a decision needs to take what is to be offloaded: data, computation, or application, and more specifically where to offload: either fog or cloud and how much to offload. Fog-cloud collaboration is stochastic in terms of task-related attributes like task size, duration, arrival rate, and required resources. Dynamic task offloading becomes crucial in order to utilize the resources at fog and cloud to improve QoS. Since this formation of task offloading policy is a bit complex in nature, this problem is addressed in the research article and proposes an intelligent task offloading model. Simulation results demonstrate the authenticity of the proposed logistic regression model acquiring 86% accuracy compared to other algorithms and confidence in the predictive task offloading policy by making sure process consistency and reliability.}, } @article {pmid35125670, year = {2022}, author = {Bacanin, N and Zivkovic, M and Bezdan, T and Venkatachalam, K and Abouhawwash, M}, title = {Modified firefly algorithm for workflow scheduling in cloud-edge environment.}, journal = {Neural computing & applications}, volume = {34}, number = {11}, pages = {9043-9068}, pmid = {35125670}, issn = {0941-0643}, abstract = {Edge computing is a novel technology, which is closely related to the concept of Internet of Things. This technology brings computing resources closer to the location where they are consumed by end-users-to the edge of the cloud. In this way, response time is shortened and lower network bandwidth is utilized. Workflow scheduling must be addressed to accomplish these goals. In this paper, we propose an enhanced firefly algorithm adapted for tackling workflow scheduling challenges in a cloud-edge environment. Our proposed approach overcomes observed deficiencies of original firefly metaheuristics by incorporating genetic operators and quasi-reflection-based learning procedure. First, we have validated the proposed improved algorithm on 10 modern standard benchmark instances and compared its performance with original and other improved state-of-the-art metaheuristics. Secondly, we have performed simulations for a workflow scheduling problem with two objectives-cost and makespan. We performed comparative analysis with other state-of-the-art approaches that were tested under the same experimental conditions. Algorithm proposed in this paper exhibits significant enhancements over the original firefly algorithm and other outstanding metaheuristics in terms of convergence speed and results' quality. Based on the output of conducted simulations, the proposed improved firefly algorithm obtains prominent results and managed to establish improvement in solving workflow scheduling in cloud-edge by reducing makespan and cost compared to other approaches.}, } @article {pmid35122132, year = {2022}, author = {Abbas, A and O'Byrne, C and Fu, DJ and Moraes, G and Balaskas, K and Struyven, R and Beqiri, S and Wagner, SK and Korot, E and Keane, PA}, title = {Evaluating an automated machine learning model that predicts visual acuity outcomes in patients with neovascular age-related macular degeneration.}, journal = {Graefe's archive for clinical and experimental ophthalmology = Albrecht von Graefes Archiv fur klinische und experimentelle Ophthalmologie}, volume = {260}, number = {8}, pages = {2461-2473}, pmid = {35122132}, issn = {1435-702X}, support = {MR/T000953/1/MRC_/Medical Research Council/United Kingdom ; MR/T019050/1/MRC_/Medical Research Council/United Kingdom ; R190028A//moorfields eye charity career development award/ ; MR/T019050/1//uk research & innovation future leaders fellowship/ ; }, mesh = {Angiogenesis Inhibitors/therapeutic use ; Humans ; Intravitreal Injections ; Machine Learning ; *Macular Degeneration/drug therapy ; Ranibizumab/therapeutic use ; Retrospective Studies ; Treatment Outcome ; Vascular Endothelial Growth Factor A ; Visual Acuity ; *Wet Macular Degeneration/diagnosis/drug therapy ; }, abstract = {PURPOSE: Neovascular age-related macular degeneration (nAMD) is a major global cause of blindness. Whilst anti-vascular endothelial growth factor (anti-VEGF) treatment is effective, response varies considerably between individuals. Thus, patients face substantial uncertainty regarding their future ability to perform daily tasks. In this study, we evaluate the performance of an automated machine learning (AutoML) model which predicts visual acuity (VA) outcomes in patients receiving treatment for nAMD, in comparison to a manually coded model built using the same dataset. Furthermore, we evaluate model performance across ethnic groups and analyse how the models reach their predictions.

METHODS: Binary classification models were trained to predict whether patients' VA would be 'Above' or 'Below' a score of 70 one year after initiating treatment, measured using the Early Treatment Diabetic Retinopathy Study (ETDRS) chart. The AutoML model was built using the Google Cloud Platform, whilst the bespoke model was trained using an XGBoost framework. Models were compared and analysed using the What-if Tool (WIT), a novel model-agnostic interpretability tool.

RESULTS: Our study included 1631 eyes from patients attending Moorfields Eye Hospital. The AutoML model (area under the curve [AUC], 0.849) achieved a highly similar performance to the XGBoost model (AUC, 0.847). Using the WIT, we found that the models over-predicted negative outcomes in Asian patients and performed worse in those with an ethnic category of Other. Baseline VA, age and ethnicity were the most important determinants of model predictions. Partial dependence plot analysis revealed a sigmoidal relationship between baseline VA and the probability of an outcome of 'Above'.

CONCLUSION: We have described and validated an AutoML-WIT pipeline which enables clinicians with minimal coding skills to match the performance of a state-of-the-art algorithm and obtain explainable predictions.}, } @article {pmid35120010, year = {2023}, author = {Wang, Z and Guo, D and Tu, Z and Huang, Y and Zhou, Y and Wang, J and Feng, L and Lin, D and You, Y and Agback, T and Orekhov, V and Qu, X}, title = {A Sparse Model-Inspired Deep Thresholding Network for Exponential Signal Reconstruction-Application in Fast Biological Spectroscopy.}, journal = {IEEE transactions on neural networks and learning systems}, volume = {34}, number = {10}, pages = {7578-7592}, doi = {10.1109/TNNLS.2022.3144580}, pmid = {35120010}, issn = {2162-2388}, mesh = {*Neural Networks, Computer ; *Algorithms ; Tomography, X-Ray Computed/methods ; Spectrum Analysis ; Signal Processing, Computer-Assisted ; Image Processing, Computer-Assisted/methods ; }, abstract = {The nonuniform sampling (NUS) is a powerful approach to enable fast acquisition but requires sophisticated reconstruction algorithms. Faithful reconstruction from partially sampled exponentials is highly expected in general signal processing and many applications. Deep learning (DL) has shown astonishing potential in this field, but many existing problems, such as lack of robustness and explainability, greatly limit its applications. In this work, by combining the merits of the sparse model-based optimization method and data-driven DL, we propose a DL architecture for spectra reconstruction from undersampled data, called MoDern. It follows the iterative reconstruction in solving a sparse model to build the neural network, and we elaborately design a learnable soft-thresholding to adaptively eliminate the spectrum artifacts introduced by undersampling. Extensive results on both synthetic and biological data show that MoDern enables more robust, high-fidelity, and ultrafast reconstruction than the state-of-the-art methods. Remarkably, MoDern has a small number of network parameters and is trained on solely synthetic data while generalizing well to biological data in various scenarios. Furthermore, we extend it to an open-access and easy-to-use cloud computing platform (XCloud-MoDern), contributing a promising strategy for further development of biological applications.}, } @article {pmid35119861, year = {2022}, author = {Rai, BK and Sresht, V and Yang, Q and Unwalla, R and Tu, M and Mathiowetz, AM and Bakken, GA}, title = {TorsionNet: A Deep Neural Network to Rapidly Predict Small-Molecule Torsional Energy Profiles with the Accuracy of Quantum Mechanics.}, journal = {Journal of chemical information and modeling}, volume = {62}, number = {4}, pages = {785-800}, doi = {10.1021/acs.jcim.1c01346}, pmid = {35119861}, issn = {1549-960X}, mesh = {Ligands ; Molecular Dynamics Simulation ; *Neural Networks, Computer ; *Quantum Theory ; Thermodynamics ; }, abstract = {Fast and accurate assessment of small-molecule dihedral energetics is crucial for molecular design and optimization in medicinal chemistry. Yet, accurate prediction of torsion energy profiles remains challenging as the current molecular mechanics (MM) methods are limited by insufficient coverage of drug-like chemical space and accurate quantum mechanical (QM) methods are too expensive. To address this limitation, we introduce TorsionNet, a deep neural network (DNN) model specifically developed to predict small-molecule torsion energy profiles with QM-level accuracy. We applied active learning to identify nearly 50k fragments (with elements H, C, N, O, F, S, and Cl) that maximized the coverage of our corporate compound library and leveraged massively parallel cloud computing resources for density functional theory (DFT) torsion scans of these fragments, generating a training data set of 1.2 million DFT energies. After training TorsionNet on this data set, we obtain a model that can rapidly predict the torsion energy profile of typical drug-like fragments with DFT-level accuracy. Importantly, our method also provides an uncertainty estimate for the predicted profiles without any additional calculations. In this report, we show that TorsionNet can accurately identify the preferred dihedral geometries observed in crystal structures. Our TorsionNet-based analysis of a diverse set of protein-ligand complexes with measured binding affinity shows a strong association between high ligand strain and low potency. We also present practical applications of TorsionNet that demonstrate how consideration of DNN-based strain energy leads to substantial improvement in existing lead discovery and design workflows. TorsionNet500, a benchmark data set comprising 500 chemically diverse fragments with DFT torsion profiles (12k MM- and DFT-optimized geometries and energies), has been created and is made publicly available.}, } @article {pmid35118441, year = {2021}, author = {Cassidy, B and Reeves, ND and Pappachan, JM and Gillespie, D and O'Shea, C and Rajbhandari, S and Maiya, AG and Frank, E and Boulton, AJ and Armstrong, DG and Najafi, B and Wu, J and Kochhar, RS and Yap, MH}, title = {The DFUC 2020 Dataset: Analysis Towards Diabetic Foot Ulcer Detection.}, journal = {TouchREVIEWS in endocrinology}, volume = {17}, number = {1}, pages = {5-11}, pmid = {35118441}, issn = {2752-5457}, support = {R01 DK124789/DK/NIDDK NIH HHS/United States ; }, abstract = {Every 20 seconds a limb is amputated somewhere in the world due to diabetes. This is a global health problem that requires a global solution. The International Conference on Medical Image Computing and Computer Assisted Intervention challenge, which concerns the automated detection of diabetic foot ulcers (DFUs) using machine learning techniques, will accelerate the development of innovative healthcare technology to address this unmet medical need. In an effort to improve patient care and reduce the strain on healthcare systems, recent research has focused on the creation of cloud-based detection algorithms. These can be consumed as a service by a mobile app that patients (or a carer, partner or family member) could use themselves at home to monitor their condition and to detect the appearance of a DFU. Collaborative work between Manchester Metropolitan University, Lancashire Teaching Hospitals and the Manchester University NHS Foundation Trust has created a repository of 4,000 DFU images for the purpose of supporting research toward more advanced methods of DFU detection. This paper presents a dataset description and analysis, assessment methods, benchmark algorithms and initial evaluation results. It facilitates the challenge by providing useful insights into state-of-the-art and ongoing research.}, } @article {pmid35116076, year = {2022}, author = {Li-Yun, Z and Cheng-Ke, W and Qiang, Z}, title = {The Construction of Folk Sports Featured Towns Based on Intelligent Building Technology Based on the Internet of Things.}, journal = {Applied bionics and biomechanics}, volume = {2022}, number = {}, pages = {4541533}, pmid = {35116076}, issn = {1176-2322}, abstract = {With the emergence of the Internet of Things, technology and Internet thinking have entered traditional communities, and combined with traditional technologies, many new and better management methods and solutions have been born. Among them, the concept of intelligent buildings is also known to people. Based on big data technology, cloud computing technology, and Internet of Things technology, smart buildings provide smart and convenient devices and services for smart device users. The Internet of Things technology is entering our lives at an unimaginable speed. It has been applied in many fields. Smart home, smart transportation, smart medical, smart agriculture, and smart grid are widely used in the Internet of Things technology. The application of Internet of Things technology to the construction of folk sports characteristic towns is of great significance. The construction of folk sports characteristic towns and the protection of intangible cultural heritage have the same purpose and interoperability of elements as the development of traditional cities. From the perspective of protecting folk culture and intangible cultural heritage, it is effective to promote the development of small towns with folk custom characteristics. Based on the research on the construction of folk-custom sports towns, this paper proposes a series of data model analysis and analyzes the proportion of sports preferences in the survey of volunteers in the folk-custom sports towns. The final result of the research shows that the ball games sports personnel accounted for the largest proportion, with 156 people accounting for 48.15%. This shows that about half of the people like ball sports, which proves that ball sports should be the mainstay of folk sports towns, and other sports should be supplemented by other sports.}, } @article {pmid35111919, year = {2022}, author = {Mejahed, S and Elshrkawey, M}, title = {A multi-objective algorithm for virtual machine placement in cloud environments using a hybrid of particle swarm optimization and flower pollination optimization.}, journal = {PeerJ. Computer science}, volume = {8}, number = {}, pages = {e834}, pmid = {35111919}, issn = {2376-5992}, abstract = {The demand for virtual machine requests has increased recently due to the growing number of users and applications. Therefore, virtual machine placement (VMP) is now critical for the provision of efficient resource management in cloud data centers. The VMP process considers the placement of a set of virtual machines onto a set of physical machines, in accordance with a set of criteria. The optimal solution for multi-objective VMP can be determined by using a fitness function that combines the objectives. This paper proposes a novel model to enhance the performance of the VMP decision-making process. Placement decisions are made based on a fitness function that combines three criteria: placement time, power consumption, and resource wastage. The proposed model aims to satisfy minimum values for the three objectives for placement onto all available physical machines. To optimize the VMP solution, the proposed fitness function was implemented using three optimization algorithms: particle swarm optimization with Lévy flight (PSOLF), flower pollination optimization (FPO), and a proposed hybrid algorithm (HPSOLF-FPO). Each algorithm was tested experimentally. The results of the comparative study between the three algorithms show that the hybrid algorithm has the strongest performance. Moreover, the proposed algorithm was tested against the bin packing best fit strategy. The results show that the proposed algorithm outperforms the best fit strategy in total server utilization.}, } @article {pmid35107425, year = {2022}, author = {Ongadi, B and Lihana, R and Kiiru, J and Ngayo, M and Obiero, G}, title = {An Android-Based Mobile App (ARVPredictor) for the Detection of HIV Drug-Resistance Mutations and Treatment at the Point of Care: Development Study.}, journal = {JMIR formative research}, volume = {6}, number = {2}, pages = {e26891}, pmid = {35107425}, issn = {2561-326X}, abstract = {BACKGROUND: HIV/AIDS remains one of the major global human health challenges, especially in resource-limited environments. By 2017, over 77.3 million people were infected with the disease, and approximately 35.4 million individuals had already died from AIDS-related illnesses. Approximately 21.7 million people were accessing ART with significant clinical outcomes. However, numerous challenges are experienced in the delivery and accurate interpretation of data on patients with HIV data by various health care providers at different care levels. Mobile health (mHealth) technology is progressively making inroads into the health sector as well as medical research. Different mobile devices have become common in health care settings, leading to rapid growth in the development of downloadable software specifically designed to fulfill particular health-related purposes.

OBJECTIVE: We developed a mobile-based app called ARVPredictor and demonstrated that it can accurately define HIV-1 drug-resistance mutations in the HIV pol gene for use at the point of care.

METHODS: ARVPredictor was designed using Android Studio with Java as the programming language and is compatible with both Android and iOS. The app system is hosted on Nginx Server, and network calls are built on PHP's Laravel framework handled by the Retrofit Library. The DigitalOcean offers a high-performance and stable cloud computing platform for ARVPredictor. This mobile app is enlisted in the Google Play Store as an "ARVPredictor" and the source code is available under MIT permissive license at a GitHub repository. To test for agreement between the ARVPredictor and Stanford HIV Database in detecting HIV subtype and NNRT and NRTI mutations, a total of 100 known HIV sequences were evaluated.

RESULTS: The mobile-based app (ARVPredictor) takes in a set of sequences or known mutations (protease, reverse transcriptase and integrase). It then returns inferred levels of resistance to selected nucleoside, nonnucleoside protease, and integrase inhibitors for accurate HIV/AIDS management at the point of care. The ARVPredictor identified similar HIV subtypes in 98/100 sequences compared with the Stanford HIV Database (κ=0.98, indicating near perfect agreement). There were 89/100 major NNRTI and NRTI mutations identified by ARVPredictor, similar to the Stanford HIV Database (κ=0.89, indicating near perfect agreement). Eight mutations classified as major by the Stanford HIV Database were classified as others by ARVPredictor.

CONCLUSIONS: The ARVPredictor largely agrees with the Stanford HIV Database in identifying both major and minor proteases, reverse transcriptase, and integrase mutations. The app can be conveniently used robustly at the point of care by HIV/AIDS care providers to improve the management of HIV infection.}, } @article {pmid35106098, year = {2022}, author = {Xing, H and Zhu, L and Chen, B and Niu, J and Li, X and Feng, Y and Fang, W}, title = {Spatial and temporal changes analysis of air quality before and after the COVID-19 in Shandong Province, China.}, journal = {Earth science informatics}, volume = {15}, number = {2}, pages = {863-876}, pmid = {35106098}, issn = {1865-0473}, abstract = {Due to the COVID-19 pandemic outbreak, the home quarantine policy was implemented to control the spread of the pandemic, which may have a positive impact on the improvement of air quality in China. In this study, Google Earth Engine (GEE) cloud computing platform was used to obtain CO, NO2, SO2 and aerosol optical depth (AOD) data from December 2018-March 2019, December 2019-March 2020, and December 2020-March 2021 in Shandong Province. These data were used to study the spatial and temporal distribution of air quality changes in Shandong Province before and after the pandemic and to analyze the reasons for the changes. The results show that: (1) Compared with the same period, CO and NO2 showed a decreasing trend from December 2019 to March 2020, with an average total change of 4082.36 mol/m[2] and 167.25 mol/m[2], and an average total change rate of 4.80% and 38.11%, respectively. SO2 did not have a significant decrease. This is inextricably linked to the reduction of human travel production activities with the implementation of the home quarantine policy. (2) The spatial and temporal variation of AOD was similar to that of pollutants, but showed a significant increase in January 2020, with an average total amount increase of 1.69 × 10[7] up about 2.54% from December 2019 to March 2020. This is attributed to urban heating and the reduction of pollutants such as NOx. (3) Pollutants and AOD were significantly correlated with meteorological data (e.g., average temperature, average humidity, average wind speed, average precipitation, etc.). This study provides data support for atmospheric protection and air quality monitoring in Shandong Province, as well as theoretical basis and technical guidance for policy formulation and urban planning.}, } @article {pmid35096141, year = {2022}, author = {Shenghua, Z and Bader, H and Jue, C}, title = {A Dynamic Equilibrium Mechanism of Core Layer Interests in the Mobile Medical Platform Ecosystem.}, journal = {Applied bionics and biomechanics}, volume = {2022}, number = {}, pages = {8915055}, pmid = {35096141}, issn = {1176-2322}, abstract = {In recent years, with the development of the mobile Internet, big data, and cloud computing, the mobile medical platforms such as Ding Xiang Yuan aggregating platform ecological resources have played an irreplaceable role in improving efficiency, optimizing resource allocation, and even promoting the transformation and upgrading of the medical industry. Despite all this, most mobile medical platforms in China still face many problems, including the immature business model, the stagnation of the interaction of knowledge and information among platform members, and the weak platform competitiveness. Based on a review of the platform and commercial ecosystems, this paper adopts the evolutionary game method and simulation to analyze the evolutionary stability strategy of operators, partners, and users in the core layer of the platform during preflow and postflow periods of a mobile medical platform, hence, to construct a beneficial dynamic equilibrium model of a platform business ecosystem under the optimal decisions made by all parties involved in the platform: the goal in the early stage (preflow period) is to increase platform user flow. Hence, the knowledge/information sharing of platform users is needed to enhance platform's visibility. While in the late period (postflow period), when the platform user flow reaches a certain scale, platform's goal is to promote revenue, which relies mainly on the pricing strategy. It is critical to promote the stability of the platform and the dynamic balance of interests at the core layer in the pricing process. This paper applies the platform business ecosystem theory and the evolutionary game theory to mobile medical platform development, contributing theoretically and practically in the following: (1) providing a more solid theoretical support for the mobile medical platform research and enriching the theoretical framework of the platform business ecosystem; (2) proposing the dynamic equilibrium model based on the optimal decisions of the platform core layers, which help to reveal the inherent law of the evolution of the mobile medical platform; (3) providing policy suggestions and management implications in constructing an appropriate business ecosystem and achieving sustainable development in mobile medical platforms.}, } @article {pmid35096132, year = {2022}, author = {Mustafa, M and Alshare, M and Bhargava, D and Neware, R and Singh, B and Ngulube, P}, title = {Perceived Security Risk Based on Moderating Factors for Blockchain Technology Applications in Cloud Storage to Achieve Secure Healthcare Systems.}, journal = {Computational and mathematical methods in medicine}, volume = {2022}, number = {}, pages = {6112815}, pmid = {35096132}, issn = {1748-6718}, mesh = {Adult ; *Blockchain/standards/statistics & numerical data ; COVID-19/epidemiology ; Cloud Computing/standards/statistics & numerical data ; Computational Biology ; *Computer Security/standards/statistics & numerical data ; Computer Simulation ; *Delivery of Health Care/standards/statistics & numerical data ; *Electronic Health Records/standards/statistics & numerical data ; Female ; Humans ; Male ; Middle Aged ; Pandemics ; Privacy ; SARS-CoV-2 ; Surveys and Questionnaires ; Young Adult ; }, abstract = {Due to the high amount of electronic health records, hospitals have prioritized data protection. Because it uses parallel computing and is distributed, the security of the cloud cannot be guaranteed. Because of the large number of e-health records, hospitals have made data security a major concern. The cloud's security cannot be guaranteed because it uses parallel processing and is distributed. The blockchain (BC) has been deployed in the cloud to preserve and secure medical data because it is particularly prone to security breaches and attacks such as forgery, manipulation, and privacy leaks. An overview of blockchain (BC) technology in cloud storage to improve healthcare system security can be obtained by reading this paper. First, we will look at the benefits and drawbacks of using a basic cloud storage system. After that, a brief overview of blockchain cloud storage technology will be offered. Many researches have focused on using blockchain technology in healthcare systems as a possible solution to the security concerns in healthcare, resulting in tighter and more advanced security requirements being provided. This survey could lead to a blockchain-based solution for the protection of cloud-outsourced healthcare data. Evaluation and comparison of the simulation tests of the offered blockchain technology-focused studies can demonstrate integrity verification with cloud storage and medical data, data interchange with reduced computational complexity, security, and privacy protection. Because of blockchain and IT, business warfare has emerged, and governments in the Middle East have embraced it. Thus, this research focused on the qualities that influence customers' interest in and approval of blockchain technology in cloud storage for healthcare system security and the aspects that increase people's knowledge of blockchain. One way to better understand how people feel about learning how to use blockchain technology in healthcare is through the United Theory of Acceptance and Use of Technology (UTAUT). A snowball sampling method was used to select respondents in an online poll to gather data about blockchain technology in Middle Eastern poor countries. A total of 443 randomly selected responses were tested using SPSS. Blockchain adoption has been shown to be influenced by anticipation, effort expectancy, social influence (SI), facilitation factors, personal innovativeness (PInn), and a perception of security risk (PSR). Blockchain adoption and acceptance were found to be influenced by anticipation, effort expectancy, social influence (SI), facilitating conditions, personal innovativeness (PInn), and perceived security risk (PSR) during the COVID-19 pandemic, as well as providing an overview of current trends in the field and issues pertaining to significance and compatibility.}, } @article {pmid35095192, year = {2022}, author = {Elawady, M and Sarhan, A and Alshewimy, MAM}, title = {Toward a mixed reality domain model for time-Sensitive applications using IoE infrastructure and edge computing (MRIoEF).}, journal = {The Journal of supercomputing}, volume = {78}, number = {8}, pages = {10656-10689}, pmid = {35095192}, issn = {0920-8542}, abstract = {Mixed reality (MR) is one of the technologies with many challenges in the design and implementation phases, especially the problems associated with time-sensitive applications. The main objective of this paper is to introduce a conceptual model for MR application that gives MR application a new layer of interactivity by using Internet of things/Internet of everything models, which provide an improved quality of experience for end-users. The model supports the cloud and fog compute layers to give more functionalities that need more processing resources and reduce the latency problems for time-sensitive applications. Validation of the proposed model is performed via demonstrating a prototype of the model applied to a real-time case study and discussing how to enable standard technologies of the various components in the model. Moreover, it shows the applicability of the model, the ease of defining the roles, and the coherence of data or processes found in the most common applications.}, } @article {pmid35087583, year = {2022}, author = {M Abd El-Aziz, R and Alanazi, R and R Shahin, O and Elhadad, A and Abozeid, A and I Taloba, A and Alshalabi, R}, title = {An Effective Data Science Technique for IoT-Assisted Healthcare Monitoring System with a Rapid Adoption of Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7425846}, pmid = {35087583}, issn = {1687-5273}, mesh = {*Cloud Computing ; Data Science ; Delivery of Health Care ; Electrocardiography ; Humans ; *Internet of Things ; }, abstract = {Patients are required to be observed and treated continually in some emergency situations. However, due to time constraints, visiting the hospital to execute such tasks is challenging. This can be achieved using a remote healthcare monitoring system. The proposed system introduces an effective data science technique for IoT supported healthcare monitoring system with the rapid adoption of cloud computing that enhances the efficiency of data processing and the accessibility of data in the cloud. Many IoT sensors are employed, which collect real healthcare data. These data are retained in the cloud for the processing of data science. In the Healthcare Monitoring-Data Science Technique (HM-DST), initially, an altered data science technique is introduced. This algorithm is known as the Improved Pigeon Optimization (IPO) algorithm, which is employed for grouping the stored data in the cloud, which helps in improving the prediction rate. Next, the optimum feature selection technique for extraction and selection of features is illustrated. A Backtracking Search-Based Deep Neural Network (BS-DNN) is utilized for classifying human healthcare. The proposed system's performance is finally examined with various healthcare datasets of real time and the variations are observed with the available smart healthcare systems for monitoring.}, } @article {pmid35085445, year = {2022}, author = {Verdu, E and Nieto, YV and Saleem, N}, title = {Call for Special Issue Papers: Cloud Computing and Big Data for Cognitive IoT.}, journal = {Big data}, volume = {10}, number = {1}, pages = {83-84}, doi = {10.1089/big.2021.29048.cfp2}, pmid = {35085445}, issn = {2167-647X}, } @article {pmid35082445, year = {2022}, author = {Edgar, RC and Taylor, B and Lin, V and Altman, T and Barbera, P and Meleshko, D and Lohr, D and Novakovsky, G and Buchfink, B and Al-Shayeb, B and Banfield, JF and de la Peña, M and Korobeynikov, A and Chikhi, R and Babaian, A}, title = {Petabase-scale sequence alignment catalyses viral discovery.}, journal = {Nature}, volume = {602}, number = {7895}, pages = {142-147}, pmid = {35082445}, issn = {1476-4687}, mesh = {Animals ; Archives ; Bacteriophages/enzymology/genetics ; Biodiversity ; *Cloud Computing ; Coronavirus/classification/enzymology/genetics ; *Databases, Genetic ; Evolution, Molecular ; Hepatitis Delta Virus/enzymology/genetics ; Humans ; Models, Molecular ; RNA Viruses/classification/enzymology/*genetics/*isolation & purification ; RNA-Dependent RNA Polymerase/chemistry/genetics ; Sequence Alignment/*methods ; Software ; Virology/*methods ; Virome/*genetics ; }, abstract = {Public databases contain a planetary collection of nucleic acid sequences, but their systematic exploration has been inhibited by a lack of efficient methods for searching this corpus, which (at the time of writing) exceeds 20 petabases and is growing exponentially[1]. Here we developed a cloud computing infrastructure, Serratus, to enable ultra-high-throughput sequence alignment at the petabase scale. We searched 5.7 million biologically diverse samples (10.2 petabases) for the hallmark gene RNA-dependent RNA polymerase and identified well over 10[5] novel RNA viruses, thereby expanding the number of known species by roughly an order of magnitude. We characterized novel viruses related to coronaviruses, hepatitis delta virus and huge phages, respectively, and analysed their environmental reservoirs. To catalyse the ongoing revolution of viral discovery, we established a free and comprehensive database of these data and tools. Expanding the known sequence diversity of viruses can reveal the evolutionary origins of emerging pathogens and improve pathogen surveillance for the anticipation and mitigation of future pandemics.}, } @article {pmid35079199, year = {2022}, author = {Hassan, MR and Ismail, WN and Chowdhury, A and Hossain, S and Huda, S and Hassan, MM}, title = {A framework of genetic algorithm-based CNN on multi-access edge computing for automated detection of COVID-19.}, journal = {The Journal of supercomputing}, volume = {78}, number = {7}, pages = {10250-10274}, pmid = {35079199}, issn = {0920-8542}, abstract = {This paper designs and develops a computational intelligence-based framework using convolutional neural network (CNN) and genetic algorithm (GA) to detect COVID-19 cases. The framework utilizes a multi-access edge computing technology such that end-user can access available resources as well the CNN on the cloud. Early detection of COVID-19 can improve treatment and mitigate transmission. During peaks of infection, hospitals worldwide have suffered from heavy patient loads, bed shortages, inadequate testing kits and short-staffing problems. Due to the time-consuming nature of the standard RT-PCR test, the lack of expert radiologists, and evaluation issues relating to poor quality images, patients with severe conditions are sometimes unable to receive timely treatment. It is thus recommended to incorporate computational intelligence methodologies, which provides highly accurate detection in a matter of minutes, alongside traditional testing as an emergency measure. CNN has achieved extraordinary performance in numerous computational intelligence tasks. However, finding a systematic, automatic and optimal set of hyperparameters for building an efficient CNN for complex tasks remains challenging. Moreover, due to advancement of technology, data are collected at sparse location and hence accumulation of data from such a diverse sparse location poses a challenge. In this article, we propose a framework of computational intelligence-based algorithm that utilize the recent 5G mobile technology of multi-access edge computing along with a new CNN-model for automatic COVID-19 detection using raw chest X-ray images. This algorithm suggests that anyone having a 5G device (e.g., 5G mobile phone) should be able to use the CNN-based automatic COVID-19 detection tool. As part of the proposed automated model, the model introduces a novel CNN structure with the genetic algorithm (GA) for hyperparameter tuning. One such combination of GA and CNN is new in the application of COVID-19 detection/classification. The experimental results show that the developed framework could classify COVID-19 X-ray images with 98.48% accuracy which is higher than any of the performances achieved by other studies.}, } @article {pmid35079189, year = {2022}, author = {Nezami, M and Tuli, KR and Dutta, S}, title = {Shareholder wealth implications of software firms' transition to cloud computing: a marketing perspective.}, journal = {Journal of the Academy of Marketing Science}, volume = {50}, number = {3}, pages = {538-562}, pmid = {35079189}, issn = {0092-0703}, abstract = {Moving into cloud computing represents a major marketing shift because it replaces on-premises offerings requiring large, up-front payments with hosted computing resources made available on-demand on a pay-per-use pricing scheme. However, little is known about the effect of this shift on cloud vendors' financial performance. This study draws on a longitudinal data set of 435 publicly listed business-to-business (B2B) firms within the computer software and services industries to investigate, from the vendors' perspective, the shareholder wealth effect of transitioning to the cloud. Using a value relevance model, we find that an unanticipated increase in the cloud ratio (i.e., the share of a firm's revenues from cloud computing) has a positive and significant effect on excess stock returns; and it has a negative and significant effect on idiosyncratic risk. Yet these effects vary across market structures and firms. In particular, unanticipated increases in market maturity intensify the positive effect of moving into the cloud on excess stock returns. Further, unexpected increases in advertising intensity strengthen the negative effect of shifting to the cloud on idiosyncratic risk.}, } @article {pmid35076283, year = {2023}, author = {Badshah, A and Jalal, A and Farooq, U and Rehman, GU and Band, SS and Iwendi, C}, title = {Service Level Agreement Monitoring as a Service: An Independent Monitoring Service for Service Level Agreements in Clouds.}, journal = {Big data}, volume = {11}, number = {5}, pages = {339-354}, doi = {10.1089/big.2021.0274}, pmid = {35076283}, issn = {2167-647X}, mesh = {*Artificial Intelligence ; Computer Simulation ; *Cloud Computing ; Internet ; Commerce ; }, abstract = {The cloud network is rapidly growing due to a massive increase in interconnected devices and the emergence of different technologies such as the Internet of things, fog computing, and artificial intelligence. In response, cloud computing needs reliable dealings among the service providers, brokers, and consumers. The existing cloud monitoring frameworks such as Amazon Cloud Watch, Paraleap Azure Watch, and Rack Space Cloud Kick work under the control of service providers. They work fine; however, this may create dissatisfaction among customers over Service Level Agreement (SLA) violations. Customers' dissatisfaction may drastically reduce the businesses of service providers. To cope with the earlier mentioned issue and get in line with cloud philosophy, Monitoring as a Service (MaaS), completely independent in nature, is needed for observing and regulating the cloud businesses. However, the existing MaaS frameworks do not address the comprehensive SLA for customer satisfaction and penalties management. This article proposes a reliable framework for monitoring the provider's services by adopting third-party monitoring services with clearcut SLA and penalties management. Since this framework monitors SLA as a cloud monitoring service, it is named as SLA-MaaS. On violations, it penalizes those who are found in breach of terms and condition enlisted in SLA. Simulation results confirmed that the proposed framework adequately satisfies the customers (as well as service providers). This helps in developing a trustworthy relationship among cloud partners and increases customer attention and retention.}, } @article {pmid35070169, year = {2022}, author = {Pezoulas, VC and Goules, A and Kalatzis, F and Chatzis, L and Kourou, KD and Venetsanopoulou, A and Exarchos, TP and Gandolfo, S and Votis, K and Zampeli, E and Burmeister, J and May, T and Marcelino Pérez, M and Lishchuk, I and Chondrogiannis, T and Andronikou, V and Varvarigou, T and Filipovic, N and Tsiknakis, M and Baldini, C and Bombardieri, M and Bootsma, H and Bowman, SJ and Soyfoo, MS and Parisis, D and Delporte, C and Devauchelle-Pensec, V and Pers, JO and Dörner, T and Bartoloni, E and Gerli, R and Giacomelli, R and Jonsson, R and Ng, WF and Priori, R and Ramos-Casals, M and Sivils, K and Skopouli, F and Torsten, W and A G van Roon, J and Xavier, M and De Vita, S and Tzioufas, AG and Fotiadis, DI}, title = {Addressing the clinical unmet needs in primary Sjögren's Syndrome through the sharing, harmonization and federated analysis of 21 European cohorts.}, journal = {Computational and structural biotechnology journal}, volume = {20}, number = {}, pages = {471-484}, pmid = {35070169}, issn = {2001-0370}, abstract = {For many decades, the clinical unmet needs of primary Sjögren's Syndrome (pSS) have been left unresolved due to the rareness of the disease and the complexity of the underlying pathogenic mechanisms, including the pSS-associated lymphomagenesis process. Here, we present the HarmonicSS cloud-computing exemplar which offers beyond the state-of-the-art data analytics services to address the pSS clinical unmet needs, including the development of lymphoma classification models and the identification of biomarkers for lymphomagenesis. The users of the platform have been able to successfully interlink, curate, and harmonize 21 regional, national, and international European cohorts of 7,551 pSS patients with respect to the ethical and legal issues for data sharing. Federated AI algorithms were trained across the harmonized databases, with reduced execution time complexity, yielding robust lymphoma classification models with 85% accuracy, 81.25% sensitivity, 85.4% specificity along with 5 biomarkers for lymphoma development. To our knowledge, this is the first GDPR compliant platform that provides federated AI services to address the pSS clinical unmet needs.}, } @article {pmid35069719, year = {2022}, author = {Li, W}, title = {Big Data Precision Marketing Approach under IoT Cloud Platform Information Mining.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4828108}, pmid = {35069719}, issn = {1687-5273}, mesh = {*Big Data ; *Cloud Computing ; Data Mining ; Humans ; Marketing ; Technology ; }, abstract = {In this article, an in-depth study and analysis of the precision marketing approach are carried out by building an IoT cloud platform and then using the technology of big data information mining. The cloud platform uses the MySQL database combined with the MongoDB database to store the cloud platform data to ensure the correct storage of data as well as to improve the access speed of data. The storage method of IoT temporal data is optimized, and the way of storing data in time slots is used to improve the efficiency of reading large amounts of data. For the scalability of the IoT data storage system, a MongoDB database clustering scheme is designed to ensure the scalability of data storage and disaster recovery capability. The relevant theories of big data marketing are reviewed and analyzed; secondly, based on the relevant theories, combined with the author's work experience and relevant information, a comprehensive analysis and research on the current situation of big data marketing are conducted, focusing on its macro-, micro-, and industry environment. The service model combines the types of user needs, encapsulates the resources obtained by the alliance through data mining for service products, and publishes and delivers them in the form of data products. From the perspective of the development of the telecommunications industry, in terms of technology, the telecommunications industry has seen the development trend of mobile replacing fixed networks and triple play. The development of emerging technologies represented by the Internet of Things and cloud computing has also led to technological changes in the telecommunications industry. Operators are facing new development opportunities and challenges. It also divides the service mode into self-service and consulting service mode according to the different degrees of users' cognition and understanding of the service, as well as proposes standardized data mining service guarantee from two aspects: after-sales service and operation supervision. A customized data mining service is a kind of data mining service for users' personalized needs. And the intelligent data mining service guarantee is proposed from two aspects of multicase experience integration and group intelligence. In the empirical research part, the big data alliance in Big Data Industry Alliance, which provides data mining service as the main business, is selected as the research object, and the data mining service model of the big data alliance proposed in this article is applied to the actual alliance to verify the scientific and rationality of the data mining service model and improve the data mining service model management system.}, } @article {pmid35064372, year = {2022}, author = {Egger, J and Wild, D and Weber, M and Bedoya, CAR and Karner, F and Prutsch, A and Schmied, M and Dionysio, C and Krobath, D and Jin, Y and Gsaxner, C and Li, J and Pepe, A}, title = {Studierfenster: an Open Science Cloud-Based Medical Imaging Analysis Platform.}, journal = {Journal of digital imaging}, volume = {35}, number = {2}, pages = {340-355}, pmid = {35064372}, issn = {1618-727X}, support = {KLI 678/FWF_/Austrian Science Fund FWF/Austria ; }, mesh = {*Cloud Computing ; Humans ; *Image Processing, Computer-Assisted ; Magnetic Resonance Imaging ; Neural Networks, Computer ; Tomography, X-Ray Computed ; }, abstract = {Imaging modalities such as computed tomography (CT) and magnetic resonance imaging (MRI) are widely used in diagnostics, clinical studies, and treatment planning. Automatic algorithms for image analysis have thus become an invaluable tool in medicine. Examples of this are two- and three-dimensional visualizations, image segmentation, and the registration of all anatomical structure and pathology types. In this context, we introduce Studierfenster (www.studierfenster.at): a free, non-commercial open science client-server framework for (bio-)medical image analysis. Studierfenster offers a wide range of capabilities, including the visualization of medical data (CT, MRI, etc.) in two-dimensional (2D) and three-dimensional (3D) space in common web browsers, such as Google Chrome, Mozilla Firefox, Safari, or Microsoft Edge. Other functionalities are the calculation of medical metrics (dice score and Hausdorff distance), manual slice-by-slice outlining of structures in medical images, manual placing of (anatomical) landmarks in medical imaging data, visualization of medical data in virtual reality (VR), and a facial reconstruction and registration of medical data for augmented reality (AR). More sophisticated features include the automatic cranial implant design with a convolutional neural network (CNN), the inpainting of aortic dissections with a generative adversarial network, and a CNN for automatic aortic landmark detection in CT angiography images. A user study with medical and non-medical experts in medical image analysis was performed, to evaluate the usability and the manual functionalities of Studierfenster. When participants were asked about their overall impression of Studierfenster in an ISO standard (ISO-Norm) questionnaire, a mean of 6.3 out of 7.0 possible points were achieved. The evaluation also provided insights into the results achievable with Studierfenster in practice, by comparing these with two ground truth segmentations performed by a physician of the Medical University of Graz in Austria. In this contribution, we presented an online environment for (bio-)medical image analysis. In doing so, we established a client-server-based architecture, which is able to process medical data, especially 3D volumes. Our online environment is not limited to medical applications for humans. Rather, its underlying concept could be interesting for researchers from other fields, in applying the already existing functionalities or future additional implementations of further image processing applications. An example could be the processing of medical acquisitions like CT or MRI from animals [Clinical Pharmacology & Therapeutics, 84(4):448-456, 68], which get more and more common, as veterinary clinics and centers get more and more equipped with such imaging devices. Furthermore, applications in entirely non-medical research in which images/volumes need to be processed are also thinkable, such as those in optical measuring techniques, astronomy, or archaeology.}, } @article {pmid35062619, year = {2022}, author = {Avgeris, M and Spatharakis, D and Dechouniotis, D and Leivadeas, A and Karyotis, V and Papavassiliou, S}, title = {ENERDGE: Distributed Energy-Aware Resource Allocation at the Edge.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {2}, pages = {}, pmid = {35062619}, issn = {1424-8220}, support = {CHIST-ERA-18-SDCDN-003//CHIST-ERA-2018-DRUID-NET project/ ; }, abstract = {Mobile applications are progressively becoming more sophisticated and complex, increasing their computational requirements. Traditional offloading approaches that use exclusively the Cloud infrastructure are now deemed unsuitable due to the inherent associated delay. Edge Computing can address most of the Cloud limitations at the cost of limited available resources. This bottleneck necessitates an efficient allocation of offloaded tasks from the mobile devices to the Edge. In this paper, we consider a task offloading setting with applications of different characteristics and requirements, and propose an optimal resource allocation framework leveraging the amalgamation of the edge resources. To balance the trade-off between retaining low total energy consumption, respecting end-to-end delay requirements and load balancing at the Edge, we additionally introduce a Markov Random Field based mechanism for the distribution of the excess workload. The proposed approach investigates a realistic scenario, including different categories of mobile applications, edge devices with different computational capabilities, and dynamic wireless conditions modeled by the dynamic behavior and mobility of the users. The framework is complemented with a prediction mechanism that facilitates the orchestration of the physical resources. The efficiency of the proposed scheme is evaluated via modeling and simulation and is shown to outperform a well-known task offloading solution, as well as a more recent one.}, } @article {pmid35062563, year = {2022}, author = {Pardeshi, MS and Sheu, RK and Yuan, SM}, title = {Hash-Chain Fog/Edge: A Mode-Based Hash-Chain for Secured Mutual Authentication Protocol Using Zero-Knowledge Proofs in Fog/Edge.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {2}, pages = {}, pmid = {35062563}, issn = {1424-8220}, abstract = {Authentication is essential for the prevention of various types of attacks in fog/edge computing. Therefore, a novel mode-based hash chain for secure mutual authentication is necessary to address the Internet of Things (IoT) devices' vulnerability, as there have been several years of growing concerns regarding their security. Therefore, a novel model is designed that is stronger and effective against any kind of unauthorized attack, as IoT devices' vulnerability is on the rise due to the mass production of IoT devices (embedded processors, camera, sensors, etc.), which ignore the basic security requirements (passwords, secure communication), making them vulnerable and easily accessible. Furthermore, crackable passwords indicate that the security measures taken are insufficient. As per the recent studies, several applications regarding its requirements are the IoT distributed denial of service attack (IDDOS), micro-cloud, secure university, Secure Industry 4.0, secure government, secure country, etc. The problem statement is formulated as the "design and implementation of dynamically interconnecting fog servers and edge devices using the mode-based hash chain for secure mutual authentication protocol", which is stated to be an NP-complete problem. The hash-chain fog/edge implementation using timestamps, mode-based hash chaining, the zero-knowledge proof property, a distributed database/blockchain, and cryptography techniques can be utilized to establish the connection of smart devices in large numbers securely. The hash-chain fog/edge uses blockchain for identity management only, which is used to store the public keys in distributed ledger form, and all these keys are immutable. In addition, it has no overhead and is highly secure as it performs fewer calculations and requires minimum infrastructure. Therefore, we designed the hash-chain fog/edge (HCFE) protocol, which provides a novel mutual authentication scheme for effective session key agreement (using ZKP properties) with secure protocol communications. The experiment outcomes proved that the hash-chain fog/edge is more efficient at interconnecting various devices and competed favorably in the benchmark comparison.}, } @article {pmid35062426, year = {2022}, author = {Krivic, P and Kusek, M and Cavrak, I and Skocir, P}, title = {Dynamic Scheduling of Contextually Categorised Internet of Things Services in Fog Computing Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {2}, pages = {}, pmid = {35062426}, issn = {1424-8220}, mesh = {Algorithms ; Cloud Computing ; *Internet of Things ; Reproducibility of Results ; }, abstract = {Fog computing emerged as a concept that responds to the requirements of upcoming solutions requiring optimizations primarily in the context of the following QoS parameters: latency, throughput, reliability, security, and network traffic reduction. The rapid development of local computing devices and container-based virtualization enabled the application of fog computing within the IoT environment. However, it is necessary to utilize algorithm-based service scheduling that considers the targeted QoS parameters to optimize the service performance and reach the potential of the fog computing concept. In this paper, we first describe our categorization of IoT services that affects the execution of our scheduling algorithm. Secondly, we propose our scheduling algorithm that considers the context of processing devices, user context, and service context to determine the optimal schedule for the execution of service components across the distributed fog-to-cloud environment. The conducted simulations confirmed the performance of the proposed algorithm and showcased its major contribution-dynamic scheduling, i.e., the responsiveness to the volatile QoS parameters due to changeable network conditions. Thus, we successfully demonstrated that our dynamic scheduling algorithm enhances the efficiency of service performance based on the targeted QoS criteria of the specific service scenario.}, } @article {pmid35062410, year = {2022}, author = {Abreha, HG and Hayajneh, M and Serhani, MA}, title = {Federated Learning in Edge Computing: A Systematic Survey.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {2}, pages = {}, pmid = {35062410}, issn = {1424-8220}, support = {31R227//Zayed Center for Health Sciences/ ; }, mesh = {*Cloud Computing ; Forecasting ; Humans ; *Privacy ; }, abstract = {Edge Computing (EC) is a new architecture that extends Cloud Computing (CC) services closer to data sources. EC combined with Deep Learning (DL) is a promising technology and is widely used in several applications. However, in conventional DL architectures with EC enabled, data producers must frequently send and share data with third parties, edge or cloud servers, to train their models. This architecture is often impractical due to the high bandwidth requirements, legalization, and privacy vulnerabilities. The Federated Learning (FL) concept has recently emerged as a promising solution for mitigating the problems of unwanted bandwidth loss, data privacy, and legalization. FL can co-train models across distributed clients, such as mobile phones, automobiles, hospitals, and more, through a centralized server, while maintaining data localization. FL can therefore be viewed as a stimulating factor in the EC paradigm as it enables collaborative learning and model optimization. Although the existing surveys have taken into account applications of FL in EC environments, there has not been any systematic survey discussing FL implementation and challenges in the EC paradigm. This paper aims to provide a systematic survey of the literature on the implementation of FL in EC environments with a taxonomy to identify advanced solutions and other open problems. In this survey, we review the fundamentals of EC and FL, then we review the existing related works in FL in EC. Furthermore, we describe the protocols, architecture, framework, and hardware requirements for FL implementation in the EC environment. Moreover, we discuss the applications, challenges, and related existing solutions in the edge FL. Finally, we detail two relevant case studies of applying FL in EC, and we identify open issues and potential directions for future research. We believe this survey will help researchers better understand the connection between FL and EC enabling technologies and concepts.}, } @article {pmid35062202, year = {2022}, author = {Crisan-Vida, M and Golea, I and Bogdan, R and Stoicu-Tivadar, L}, title = {Application Using Standard Communication Between Medical Facilities.}, journal = {Studies in health technology and informatics}, volume = {289}, number = {}, pages = {498-499}, doi = {10.3233/SHTI210969}, pmid = {35062202}, issn = {1879-8365}, mesh = {Communication ; Delivery of Health Care ; *Electronic Health Records ; *Health Level Seven ; Humans ; Software ; }, abstract = {The web-based application described in this paper will support the patient to receive the treatment quicker and the physician to generate the prescription easier. The patient will have a real time information if their treatment/prescription is available in the pharmacy. Using a cloud solution will have all the information always available and without delays, the only requirement is the Internet connectivity. Using standardized communication as HL7 FHIR, the information exchanged is easier understanded by different medical units, and in the future other medical units can access the patient treatment/prescription and have a medical history, in this way the patient will receive better quality in treatment and health care.}, } @article {pmid35062191, year = {2022}, author = {Gallos, P and Menychtas, A and Panagopoulos, C and Bimpas, M and Maglogiannis, I}, title = {Quantifying Citizens' Well-Being in Areas with Natural Based Solutions Using Mobile Computing.}, journal = {Studies in health technology and informatics}, volume = {289}, number = {}, pages = {465-468}, doi = {10.3233/SHTI210958}, pmid = {35062191}, issn = {1879-8365}, mesh = {Cities ; Healthy Lifestyle ; Hot Temperature ; *Mobile Applications ; *Telemedicine ; }, abstract = {Urban planners, architects and civil engineers are integrating Nature-Based Solutions (NBS) to address contemporary environmental, social, health and economic challenges. Many studies claim that NBS are poised to improve citizens' well-being in urban areas. NBS can also benefit Public Health, as they can contribute to optimising environmental parameters (such as urban heat island effects, floods, etc.), as well as to the reduction of diseases, as for example cardiovascular ones and the overall mortality rate. In addition, the usage of mobile health (mHealth) solutions has been broadly applied to support citizens' well-being as they can offer monitoring of their physical and physiological status and promote a healthier lifestyle. The aim of this paper is to present the specifications, the design and the development of a mobile app for monitoring citizens' well-being in areas where NBS have been applied. The users' physical activity and vital signs are recorded by wearable devices and the users' locations are recorded by the proposed mobile application. All collected data are transferred to the cloud platform where data management mechanisms aggregate data from different sources for combined analysis. The mobile application is currently available for Android and iOS devices and it is compatible with most smart devices and wearables. The "euPOLIS by BioAssist" application can be used as a health and other data collection tool to investigate citizen's well-being improvement in areas with NBS.}, } @article {pmid35061658, year = {2022}, author = {Schnase, JL and Carroll, ML}, title = {Automatic variable selection in ecological niche modeling: A case study using Cassin's Sparrow (Peucaea cassinii).}, journal = {PloS one}, volume = {17}, number = {1}, pages = {e0257502}, pmid = {35061658}, issn = {1932-6203}, mesh = {Animals ; *Ecosystem ; *Algorithms ; Sparrows/physiology ; Monte Carlo Method ; Climate ; Software ; Models, Biological ; }, abstract = {MERRA/Max provides a feature selection approach to dimensionality reduction that enables direct use of global climate model outputs in ecological niche modeling. The system accomplishes this reduction through a Monte Carlo optimization in which many independent MaxEnt runs, operating on a species occurrence file and a small set of randomly selected variables in a large collection of variables, converge on an estimate of the top contributing predictors in the larger collection. These top predictors can be viewed as potential candidates in the variable selection step of the ecological niche modeling process. MERRA/Max's Monte Carlo algorithm operates on files stored in the underlying filesystem, making it scalable to large data sets. Its software components can run as parallel processes in a high-performance cloud computing environment to yield near real-time performance. In tests using Cassin's Sparrow (Peucaea cassinii) as the target species, MERRA/Max selected a set of predictors from Worldclim's Bioclim collection of 19 environmental variables that have been shown to be important determinants of the species' bioclimatic niche. It also selected biologically and ecologically plausible predictors from a more diverse set of 86 environmental variables derived from NASA's Modern-Era Retrospective Analysis for Research and Applications Version 2 (MERRA-2) reanalysis, an output product of the Goddard Earth Observing System Version 5 (GEOS-5) modeling system. We believe these results point to a technological approach that could expand the use global climate model outputs in ecological niche modeling, foster exploratory experimentation with otherwise difficult-to-use climate data sets, streamline the modeling process, and, eventually, enable automated bioclimatic modeling as a practical, readily accessible, low-cost, commercial cloud service.}, } @article {pmid35060754, year = {2022}, author = {Krampis, K}, title = {Democratizing bioinformatics through easily accessible software platforms for non-experts in the field.}, journal = {BioTechniques}, volume = {72}, number = {2}, pages = {36-38}, pmid = {35060754}, issn = {1940-9818}, support = {U54 CA221704/CA/NCI NIH HHS/United States ; }, mesh = {*Computational Biology ; Genomics ; *Software ; }, } @article {pmid35058976, year = {2022}, author = {Zhang, J and Li, T and Jiang, Q and Ma, J}, title = {Enabling efficient traceable and revocable time-based data sharing in smart city.}, journal = {EURASIP journal on wireless communications and networking}, volume = {2022}, number = {1}, pages = {3}, pmid = {35058976}, issn = {1687-1472}, abstract = {With the assistance of emerging techniques, such as cloud computing, fog computing and Internet of Things (IoT), smart city is developing rapidly into a novel and well-accepted service pattern these days. The trend also facilitates numerous relevant applications, e.g., smart health care, smart office, smart campus, etc., and drives the urgent demand for data sharing. However, this brings many concerns on data security as there is more private and sensitive information contained in the data of smart city applications. It may incur disastrous consequences if the shared data are illegally accessed, which necessitates an efficient data access control scheme for data sharing in smart city applications with resource-poor user terminals. To this end, we proposes an efficient traceable and revocable time-based CP-ABE (TR-TABE) scheme which can achieve time-based and fine-grained data access control over large attribute universe for data sharing in large-scale smart city applications. To trace and punish the malicious users that intentionally leak their keys to pursue illicit profits, we design an efficient user tracing and revocation mechanism with forward and backward security. For efficiency improvement, we integrate outsourced decryption and verify the correctness of its result. The proposed scheme is proved secure with formal security proof and is demonstrated to be practical for data sharing in smart city applications with extensive performance evaluation.}, } @article {pmid35052084, year = {2021}, author = {Balicki, J}, title = {Many-Objective Quantum-Inspired Particle Swarm Optimization Algorithm for Placement of Virtual Machines in Smart Computing Cloud.}, journal = {Entropy (Basel, Switzerland)}, volume = {24}, number = {1}, pages = {}, pmid = {35052084}, issn = {1099-4300}, support = {0b241de88b60bf04//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; 2555c92a616617fe//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; 468b476fc32fb800//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; 7266125be481efdf//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; bdf736e2e2ff48a7//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; ad3a4560dcf5b889//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; bbc3447ea3f36d12//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; 3ff49a7a7655c658//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; c2cd01ef141cd4c1//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; d29efa1a53c86b9f//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; 7fc62d47fb31935b//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; a59d04f6891c30b7//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; fab425d0ee521689//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; eece9460d518b025//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; cedad61ffdeda3d3//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; 6706376c39a87747//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; 571a18daaa84fb1e//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; 967271458f9e94eb//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; 30601201c3d85c5b//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; }, abstract = {Particle swarm optimization algorithm (PSO) is an effective metaheuristic that can determine Pareto-optimal solutions. We propose an extended PSO by introducing quantum gates in order to ensure the diversity of particle populations that are looking for efficient alternatives. The quality of solutions was verified in the issue of assignment of resources in the computing cloud to improve the live migration of virtual machines. We consider the multi-criteria optimization problem of deep learning-based models embedded into virtual machines. Computing clouds with deep learning agents can support several areas of education, smart city or economy. Because deep learning agents require lots of computer resources, seven criteria are studied such as electric power of hosts, reliability of cloud, CPU workload of the bottleneck host, communication capacity of the critical node, a free RAM capacity of the most loaded memory, a free disc memory capacity of the most busy storage, and overall computer costs. Quantum gates modify an accepted position for the current location of a particle. To verify the above concept, various simulations have been carried out on the laboratory cloud based on the OpenStack platform. Numerical experiments have confirmed that multi-objective quantum-inspired particle swarm optimization algorithm provides better solutions than the other metaheuristics.}, } @article {pmid35047635, year = {2022}, author = {Kasinathan, G and Jayakumar, S}, title = {Cloud-Based Lung Tumor Detection and Stage Classification Using Deep Learning Techniques.}, journal = {BioMed research international}, volume = {2022}, number = {}, pages = {4185835}, pmid = {35047635}, issn = {2314-6141}, mesh = {*Cloud Computing ; *Databases, Factual ; *Deep Learning ; Humans ; Lung Neoplasms/*diagnostic imaging ; Neoplasm Staging ; *Positron Emission Tomography Computed Tomography ; *Radiographic Image Interpretation, Computer-Assisted ; }, abstract = {Artificial intelligence (AI), Internet of Things (IoT), and the cloud computing have recently become widely used in the healthcare sector, which aid in better decision-making for a radiologist. PET imaging or positron emission tomography is one of the most reliable approaches for a radiologist to diagnosing many cancers, including lung tumor. In this work, we proposed stage classification of lung tumor which is a more challenging task in computer-aided diagnosis. As a result, a modified computer-aided diagnosis is being considered as a way to reduce the heavy workloads and second opinion to radiologists. In this paper, we present a strategy for classifying and validating different stages of lung tumor progression, as well as a deep neural model and data collection using cloud system for categorizing phases of pulmonary illness. The proposed system presents a Cloud-based Lung Tumor Detector and Stage Classifier (Cloud-LTDSC) as a hybrid technique for PET/CT images. The proposed Cloud-LTDSC initially developed the active contour model as lung tumor segmentation, and multilayer convolutional neural network (M-CNN) for classifying different stages of lung cancer has been modelled and validated with standard benchmark images. The performance of the presented technique is evaluated using a benchmark image LIDC-IDRI dataset of 50 low doses and also utilized the lung CT DICOM images. Compared with existing techniques in the literature, our proposed method achieved good result for the performance metrics accuracy, recall, and precision evaluated. Under numerous aspects, our proposed approach produces superior outcomes on all of the applied dataset images. Furthermore, the experimental result achieves an average lung tumor stage classification accuracy of 97%-99.1% and an average of 98.6% which is significantly higher than the other existing techniques.}, } @article {pmid35047153, year = {2022}, author = {Syed, SA and Sheela Sobana Rani, K and Mohammad, GB and Anil Kumar, G and Chennam, KK and Jaikumar, R and Natarajan, Y and Srihari, K and Barakkath Nisha, U and Sundramurthy, VP}, title = {Design of Resources Allocation in 6G Cybertwin Technology Using the Fuzzy Neuro Model in Healthcare Systems.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {5691203}, pmid = {35047153}, issn = {2040-2309}, mesh = {*Cloud Computing ; Computer Simulation ; *Delivery of Health Care ; Humans ; Resource Allocation ; Technology ; }, abstract = {In 6G edge communication networks, the machine learning models play a major role in enabling intelligent decision-making in case of optimal resource allocation in case of the healthcare system. However, it causes a bottleneck, in the form of sophisticated memory calculations, between the hidden layers and the cost of communication between the edge devices/edge nodes and the cloud centres, while transmitting the data from the healthcare management system to the cloud centre via edge nodes. In order to reduce these hurdles, it is important to share workloads to further eliminate the problems related to complicated memory calculations and transmission costs. The effort aims mainly to reduce storage costs and cloud computing associated with neural networks as the complexity of the computations increases with increasing numbers of hidden layers. This study modifies federated teaching to function with distributed assignment resource settings as a distributed deep learning model. It improves the capacity to learn from the data and assigns an ideal workload depending on the limited available resources, slow network connection, and more edge devices. Current network status can be sent to the cloud centre by the edge devices and edge nodes autonomously using cybertwin, meaning that local data are often updated to calculate global data. The simulation shows how effective resource management and allocation is better than standard approaches. It is seen from the results that the proposed method achieves higher resource utilization and success rate than existing methods. Index Terms are fuzzy, healthcare, bioinformatics, 6G wireless communication, cybertwin, machine learning, neural network, and edge.}, } @article {pmid35047027, year = {2022}, author = {Raju, KB and Dara, S and Vidyarthi, A and Gupta, VM and Khan, B}, title = {Smart Heart Disease Prediction System with IoT and Fog Computing Sectors Enabled by Cascaded Deep Learning Model.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {1070697}, pmid = {35047027}, issn = {1687-5273}, mesh = {Cloud Computing ; *Deep Learning ; *Heart Diseases ; Humans ; *Internet of Things ; Neural Networks, Computer ; }, abstract = {Chronic illnesses like chronic respiratory disease, cancer, heart disease, and diabetes are threats to humans around the world. Among them, heart disease with disparate features or symptoms complicates diagnosis. Because of the emergence of smart wearable gadgets, fog computing and "Internet of Things" (IoT) solutions have become necessary for diagnosis. The proposed model integrates Edge-Fog-Cloud computing for the accurate and fast delivery of outcomes. The hardware components collect data from different patients. The heart feature extraction from signals is done to get significant features. Furthermore, the feature extraction of other attributes is also gathered. All these features are gathered and subjected to the diagnostic system using an Optimized Cascaded Convolution Neural Network (CCNN). Here, the hyperparameters of CCNN are optimized by the Galactic Swarm Optimization (GSO). Through the performance analysis, the precision of the suggested GSO-CCNN is 3.7%, 3.7%, 3.6%, 7.6%, 67.9%, 48.4%, 33%, 10.9%, and 7.6% more advanced than PSO-CCNN, GWO-CCNN, WOA-CCNN, DHOA-CCNN, DNN, RNN, LSTM, CNN, and CCNN, respectively. Thus, the comparative analysis of the suggested system ensures its efficiency over the conventional models.}, } @article {pmid35043169, year = {2022}, author = {Xie, M and Yang, L and Chen, G and Wang, Y and Xie, Z and Wang, H}, title = {RiboChat: a chat-style web interface for analysis and annotation of ribosome profiling data.}, journal = {Briefings in bioinformatics}, volume = {23}, number = {2}, pages = {}, doi = {10.1093/bib/bbab559}, pmid = {35043169}, issn = {1477-4054}, mesh = {Computational Biology/methods ; *Protein Biosynthesis ; RNA, Messenger/metabolism ; *Ribosomes/genetics/metabolism ; Software ; }, abstract = {The increasing volume of ribosome profiling (Ribo-seq) data, computational complexity of its data processing and operational handicap of related analytical procedures present a daunting set of informatics challenges. These impose a substantial barrier to researchers particularly with no or limited bioinformatics expertise in analyzing and decoding translation information from Ribo-seq data, thus driving the need for a new research paradigm for data computation and information extraction. In this knowledge base, we herein present a novel interactive web platform, RiboChat (https://db.cngb.org/ribobench/chat.html), for direct analyzing and annotating Ribo-seq data in the form of a chat conversation. It consists of a user-friendly web interface and a backend cloud-computing service. When typing a data analysis question into the chat window, the object-text detection module will be run to recognize relevant keywords from the input text. Based on the features identified in the input, individual analytics modules are then scored to find the perfect-matching candidate. The corresponding analytics module will be further executed after checking the completion status of the uploading of datasets and configured parameters. Overall, RiboChat represents an important step forward in the emerging direction of next-generation data analytics and will enable the broad research community to conveniently decipher translation information embedded within Ribo-seq data.}, } @article {pmid35037207, year = {2022}, author = {Wang, L and Lu, Z and Van Buren, P and Ware, D}, title = {SciApps: An Automated Platform for Processing and Distribution of Plant Genomics Data.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2443}, number = {}, pages = {197-209}, pmid = {35037207}, issn = {1940-6029}, mesh = {Computational Biology ; Genome, Plant ; *Genomics/methods ; Information Storage and Retrieval ; *Software ; Workflow ; }, abstract = {SciApps is an open-source, web-based platform for processing, storing, visualizing, and distributing genomic data and analysis results. Built upon the Tapis (formerly Agave) platform, SciApps brings users TB-scale of data storage via CyVerse Data Store and over one million CPUs via the Extreme Science and Engineering Discovery Environment (XSEDE) resources at Texas Advanced Computing Center (TACC). SciApps provides users ways to chain individual jobs into automated and reproducible workflows in a distributed cloud and provides a management system for data, associated metadata, individual analysis jobs, and multi-step workflows. This chapter provides examples of how to (1) submitting, managing, constructing workflows, (2) using public workflows for Bulked Segregant Analysis (BSA), (3) constructing a Data Analysis Center (DAC), and Data Coordination Center (DCC) for the plant ENCODE project.}, } @article {pmid35037200, year = {2022}, author = {Williams, J}, title = {CyVerse for Reproducible Research: RNA-Seq Analysis.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2443}, number = {}, pages = {57-79}, pmid = {35037200}, issn = {1940-6029}, mesh = {*Big Data ; Cloud Computing ; Data Analysis ; RNA-Seq ; Reproducibility of Results ; *Software ; }, abstract = {Posing complex research questions poses complex reproducibility challenges. Datasets may need to be managed over long periods of time. Reliable and secure repositories are needed for data storage. Sharing big data requires advance planning and becomes complex when collaborators are spread across institutions and countries. Many complex analyses require the larger compute resources only provided by cloud and high-performance computing infrastructure. Finally at publication, funder and publisher requirements must be met for data availability and accessibility and computational reproducibility. For all of these reasons, cloud-based cyberinfrastructures are an important component for satisfying the needs of data-intensive research. Learning how to incorporate these technologies into your research skill set will allow you to work with data analysis challenges that are often beyond the resources of individual research institutions. One of the advantages of CyVerse is that there are many solutions for high-powered analyses that do not require knowledge of command line (i.e., Linux) computing. In this chapter we will highlight CyVerse capabilities by analyzing RNA-Seq data. The lessons learned will translate to doing RNA-Seq in other computing environments and will focus on how CyVerse infrastructure supports reproducibility goals (e.g., metadata management, containers), team science (e.g., data sharing features), and flexible computing environments (e.g., interactive computing, scaling).}, } @article {pmid35036553, year = {2022}, author = {Ogwel, B and Odhiambo-Otieno, G and Otieno, G and Abila, J and Omore, R}, title = {Leveraging cloud computing for improved health service delivery: Findings from public health facilities in Kisumu County, Western Kenya-2019.}, journal = {Learning health systems}, volume = {6}, number = {1}, pages = {e10276}, pmid = {35036553}, issn = {2379-6146}, abstract = {INTRODUCTION: Healthcare delivery systems across the world have been shown to fall short of the ideals of being cost-effective and meeting pre-established standards of quality but the problem is more pronounced in Africa. Cloud computing emerges as a platform healthcare institutions could leverage to address these shortfalls. The aim of this study was to establish the extent of cloud computing adoption and its influence on health service delivery by public health facilities in Kisumu County.

METHODS: The study employed a cross-sectional study design in one-time data collection among facility in-charges and health records officers from 57 public health facilities. The target population was 114 healthcare personnel and the sample size (n = 88) was computed using Yamane formula and drawn using stratified random sampling. Poisson regression was used to determine the influence of cloud computing adoption on the number of realized benefits to health service delivery.

RESULTS: Among 80 respondents, Cloud computing had been adopted by 42 (53%) while Software-as-a-Service, Platform-as-a-Service and Infrastructure-as-a-Service implementations were at 100%, 0% and 5% among adopters, respectively. Overall, those who had adopted cloud computing realized a significantly higher number of benefits to health service delivery compared to those who had not (Incident-rate ratio (IRR) =1.93, 95% confidence interval (95% CI) [1.36-2.72]). A significantly higher number of benefits was realized by those who had implemented Infrastructure-as-a-Service alongside Software-as-a-Service (IRR = 2.22, 95% CI [1.15-4.29]) and those who had implemented Software-as-a-Service only (IRR = 1.89, 95% CI [1.33-2.70]) compared to non-adopters. We observed similar results in the stratified analysis looking at economic, operational, and functional benefits to health service delivery.

CONCLUSION: Cloud computing resulted in improved health service delivery with these benefits still being realized irrespective of the service implementation model deployed. The findings buttress the need for healthcare institutions to adopt cloud computing and integrate it in their operations in order to improve health service delivery.}, } @article {pmid35036538, year = {2021}, author = {Li, Y and Li, T and Shen, P and Hao, L and Liu, W and Wang, S and Song, Y and Bao, L}, title = {Sim-DRS: a similarity-based dynamic resource scheduling algorithm for microservice-based web systems.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e824}, pmid = {35036538}, issn = {2376-5992}, abstract = {Microservice-based Web Systems (MWS), which provide a fundamental infrastructure for constructing large-scale cloud-based Web applications, are designed as a set of independent, small and modular microservices implementing individual tasks and communicating with messages. This microservice-based architecture offers great application scalability, but meanwhile incurs complex and reactive autoscaling actions that are performed dynamically and periodically based on current workloads. However, this problem has thus far remained largely unexplored. In this paper, we formulate a problem of Dynamic Resource Scheduling for Microservice-based Web Systems (DRS-MWS) and propose a similarity-based heuristic scheduling algorithm that aims to quickly find viable scheduling schemes by utilizing solutions to similar problems. The performance superiority of the proposed scheduling solution in comparison with three state-of-the-art algorithms is illustrated by experimental results generated through a well-known microservice benchmark on disparate computing nodes in public clouds.}, } @article {pmid35036334, year = {2022}, author = {Hussain, SA and Bassam, NA and Zayegh, A and Ghawi, SA}, title = {Prediction and evaluation of healthy and unhealthy status of COVID-19 patients using wearable device prototype data.}, journal = {MethodsX}, volume = {9}, number = {}, pages = {101618}, pmid = {35036334}, issn = {2215-0161}, abstract = {COVID-19 pandemic seriousness is making the whole world suffer due to inefficient medication and vaccines. The article prediction analysis is carried out with the dataset downloaded from the Application peripheral interface (API) designed explicitly for COVID-19 quarantined patients. The measured data is collected from a wearable device used for quarantined healthy and unhealthy patients. The wearable device provides data of temperature, heart rate, SPO2, blood saturation, and blood pressure timely for alerting the medical authorities and providing a better diagnosis and treatment. The dataset contains 1085 patients with eight features representing 490 COVID-19 infected and 595 standard cases. The work considers different parameters, namely heart rate, temperature, SpO2, bpm parameters, and health status. Furthermore, the real-time data collected can predict the health status of patients as infected and non-infected from measured parameters. The collected dataset uses a random forest classifier with linear and polynomial regression to train and validate COVID-19 patient data. The google colab is an Integral development environment inbuilt with python and Jupyter notebook with scikit-learn version 0.22.1 virtually tested on cloud coding tools. The dataset is trained and tested in 80% and 20% ratio for accuracy evaluation and avoid overfitting in the model. This analysis could help medical authorities and governmental agencies of every country respond timely and reduce the contamination of the disease.•The measured data provide a comprehensive mapping of disease symptoms to predict the health status. They can restrict the virus transmission and take necessary steps to control, mitigate and manage the disease.•Benefits in scientific research with Artificial Intelligence (AI) to tackle the hurdles in analyzing disease diagnosis.•The diagnosis results of disease symptoms can identify the severity of the patient to monitor and manage the difficulties for the outbreak caused.}, } @article {pmid35035864, year = {2022}, author = {He, P and Zhang, B and Shen, S}, title = {Effects of Out-of-Hospital Continuous Nursing on Postoperative Breast Cancer Patients by Medical Big Data.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {9506915}, pmid = {35035864}, issn = {2040-2309}, mesh = {Big Data ; *Breast Neoplasms/surgery ; Female ; Hospitals ; Humans ; Patient Compliance ; *Quality of Life ; }, abstract = {This study aimed to explore the application value of the intelligent medical communication system based on the Apriori algorithm and cloud follow-up platform in out-of-hospital continuous nursing of breast cancer patients. In this study, the Apriori algorithm is optimized by Amazon Web Services (AWS) and graphics processing unit (GPU) to improve its data mining speed. At the same time, a cloud follow-up platform-based intelligent mobile medical communication system is established, which includes the log-in, my workstation, patient records, follow-up center, satisfaction management, propaganda and education center, SMS platform, and appointment management module. The subjects are divided into the control group (routine telephone follow-up, 163) and the intervention group (continuous nursing intervention, 216) according to different nursing methods. The cloud follow-up platform-based intelligent medical communication system is used to analyze patients' compliance, quality of life before and after nursing, function limitation of affected limb, and nursing satisfaction under different nursing methods. The running time of Apriori algorithm is proportional to the data amount and inversely proportional to the number of nodes in the cluster. Compared with the control group, there are statistical differences in the proportion of complete compliance data, the proportion of poor compliance data, and the proportion of total compliance in the intervention group (P < 0.05). After the intervention, the scores of the quality of life in the two groups are statistically different from those before treatment (P < 0.05), and the scores of the quality of life in the intervention group were higher than those in the control group (P < 0.05). The proportion of patients with limited and severely limited functional activity of the affected limb in the intervention group is significantly lower than that in the control group (P < 0.05). The satisfaction rate of postoperative nursing in the intervention group is significantly higher than that in the control group (P < 0.001), and the proportion of basically satisfied and dissatisfied patients in the control group was higher than that in the intervention group (P < 0.05).}, } @article {pmid35035843, year = {2022}, author = {Tang, J}, title = {Discussion on Health Service System of Mobile Medical Institutions Based on Internet of Things and Cloud Computing.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {5235349}, pmid = {35035843}, issn = {2040-2309}, mesh = {*Cloud Computing ; Delivery of Health Care ; Health Services ; Humans ; Internet ; *Internet of Things ; }, abstract = {Because modern human beings pay more and more attention to physical health, and there are many problems in the traditional medical service system, human beings have a higher and higher voice for the new medical model. At present, there are many researches on the application of modern science and technology to put forward solutions to medical development, but they generally pay attention to some details and ignore the construction of the whole medical service system. In order to solve the problems of low efficiency of the traditional medical model, difficult communication between doctors and patients, unreasonable allocation of medical resources, and so on, this article proposes establishing a perfect medical and health service system. First, the correlation functions are used, such as cosine correlation, to calculate the correlation of various medical products, and then the correlation measurement methods of cloud computing and the Internet of Things are used to realize the network connection of smart medical equipment, efficiently store, calculate and analyze health data, and realize online outpatient services, health file management, data analysis, and other functions. Then, the energy consumption formula of the wireless transceiver was used to reduce the resource loss in the operation of the system. Then, we use the questionnaire to understand the current situation of mobile medical and put forward improvement suggestions. This article also scores the performance of the system. The experimental results show that the performance rating of traditional medical institutions is B, while the model rating of mobile medical institutions is a, and the efficiency is optimized by 4.42%.}, } @article {pmid35035817, year = {2022}, author = {Li, W and Zhang, Y and Wang, J and Li, Q and Zhao, D and Tang, B and Wang, S and Shao, H}, title = {MicroRNA-489 Promotes the Apoptosis of Cardiac Muscle Cells in Myocardial Ischemia-Reperfusion Based on Smart Healthcare.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {2538769}, pmid = {35035817}, issn = {2040-2309}, mesh = {Animals ; Apoptosis/physiology ; Humans ; *MicroRNAs/genetics/metabolism ; *Myocardial Infarction ; *Myocardial Ischemia ; *Myocardial Reperfusion Injury/genetics/metabolism ; Myocytes, Cardiac/metabolism ; Phosphatidylinositol 3-Kinases/metabolism ; Proto-Oncogene Proteins c-akt/metabolism ; Reperfusion ; Signal Transduction ; }, abstract = {With the development of information technology, the concept of smart healthcare has gradually come to the fore. Smart healthcare uses a new generation of information technologies, such as the Internet of Things (loT), big data, cloud computing, and artificial intelligence, to transform the traditional medical system in an all-around way, making healthcare more efficient, more convenient, and more personalized. miRNAs can regulate the proliferation, differentiation, and apoptosis of human cells. Relevant studies have also shown that miRNAs may play a key role in the occurrence and development of myocardial ischemia-reperfusion injury (MIRI). This study aims to explore the effects of miR-489 in MIRI. In this study, miR-489 expression in a myocardial ischemia-reperfusion animal model and H9C2 cells induced by H/R was detected by qRT-PCR. The release of lactate dehydrogenase (LDH) and the activity of creatine kinase (CK) was detected after miR-489 knockdown in H9C2 cells induced by H/R. The apoptosis of H9C2 cells and animal models were determined by ELISA. The relationship between miR-489 and SPIN1 was verified by a double fluorescence reporter enzyme assay. The expression of the PI3K/AKT pathway-related proteins was detected by Western blot. Experimental results showed that miR-489 was highly expressed in cardiac muscle cells of the animal model and in H9C2 cells induced by H/R of the myocardial infarction group, which was positively associated with the apoptosis of cardiac muscle cells with ischemia-reperfusion. miR-489 knockdown can reduce the apoptosis of cardiac muscle cells caused by ischemia-reperfusion. In downstream targeting studies, it was found that miR-489 promotes the apoptosis of cardiac muscle cells after ischemia-reperfusion by targeting the inhibition of the SPIN1-mediated PI3K/AKT pathway. In conclusion, high expression of miR-489 is associated with increased apoptosis of cardiac muscle cells after ischemia-reperfusion, which can promote the apoptosis after ischemia-reperfusion by targeting the inhibition of the SPIN1-mediated PI3K/AKT pathway. Therefore, miR-489 can be one of the potential therapeutic targets for reducing the apoptosis of cardiac muscle cells after ischemia-reperfusion.}, } @article {pmid35033986, year = {2022}, author = {Jadhao, S and Davison, CL and Roulis, EV and Schoeman, EM and Divate, M and Haring, M and Williams, C and Shankar, AJ and Lee, S and Pecheniuk, NM and Irving, DO and Hyland, CA and Flower, RL and Nagaraj, SH}, title = {RBCeq: A robust and scalable algorithm for accurate genetic blood typing.}, journal = {EBioMedicine}, volume = {76}, number = {}, pages = {103759}, pmid = {35033986}, issn = {2352-3964}, mesh = {Algorithms ; Australia ; *Blood Group Antigens/genetics ; *Blood Grouping and Crossmatching ; Genotype ; Humans ; Reproducibility of Results ; }, abstract = {BACKGROUND: While blood transfusion is an essential cornerstone of hematological care, patients requiring repetitive transfusion remain at persistent risk of alloimmunization due to the diversity of human blood group polymorphisms. Despite the promise, user friendly methods to accurately identify blood types from next-generation sequencing data are currently lacking. To address this unmet need, we have developed RBCeq, a novel genetic blood typing algorithm to accurately identify 36 blood group systems.

METHODS: RBCeq can predict complex blood groups such as RH, and ABO that require identification of small indels and copy number variants. RBCeq also reports clinically significant, rare, and novel variants with potential clinical relevance that may lead to the identification of novel blood group alleles.

FINDINGS: The RBCeq algorithm demonstrated 99·07% concordance when validated on 402 samples which included 29 antigens with serology and 9 antigens with SNP-array validation in 14 blood group systems and 59 antigens validation on manual predicted phenotype from variant call files. We have also developed a user-friendly web server that generates detailed blood typing reports with advanced visualization (https://www.rbceq.org/).

INTERPRETATION: RBCeq will assist blood banks and immunohematology laboratories by overcoming existing methodological limitations like scalability, reproducibility, and accuracy when genotyping and phenotyping in multi-ethnic populations. This Amazon Web Services (AWS) cloud based platform has the potential to reduce pre-transfusion testing time and to increase sample processing throughput, ultimately improving quality of patient care.

FUNDING: This work was supported in part by Advance Queensland Research Fellowship, MRFF Genomics Health Futures Mission (76,757), and the Australian Red Cross LifeBlood. The Australian governments fund the Australian Red Cross Lifeblood for the provision of blood, blood products and services to the Australian community.}, } @article {pmid35031650, year = {2022}, author = {Li, J and Wang, J and Yang, L and Ye, H}, title = {Spatiotemporal change analysis of long time series inland water in Sri Lanka based on remote sensing cloud computing.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {766}, pmid = {35031650}, issn = {2045-2322}, support = {No. 2021KRM079//Innovation Capability Support Program of ShaanxiNo. 2021KRM079/ ; No. 2021WHZ0090//Technology Innovation Center for Land Engineering and Human Settlements, Shaanxi Land Engineering Construction Group Co.,Ltd and Xi' an Jiaotong University/ ; No. XDA2003030201//Strategic Priority Research Program of Chinese Academy of Sciences/ ; No. 41771388//National Natural Science Foundation of China/ ; }, abstract = {Sri Lanka is an important hub connecting Asia-Africa-Europe maritime routes. It receives abundant but uneven spatiotemporal distribution of rainfall and has evident seasonal water shortages. Monitoring water area changes in inland lakes and reservoirs plays an important role in guiding the development and utilisation of water resources. In this study, a rapid surface water extraction model based on the Google Earth Engine remote sensing cloud computing platform was constructed. By evaluating the optimal spectral water index method, the spatiotemporal variations of reservoirs and inland lakes in Sri Lanka were analysed. The results showed that Automated Water Extraction Index (AWEIsh) could accurately identify the water boundary with an overall accuracy of 99.14%, which was suitable for surface water extraction in Sri Lanka. The area of the Maduru Oya Reservoir showed an overall increasing trend based on small fluctuations from 1988 to 2018, and the monthly area of the reservoir fluctuated significantly in 2017. Thus, water resource management in the dry zone should focus more on seasonal regulation and control. From 1995 to 2015, the number and area of lakes and reservoirs in Sri Lanka increased to different degrees, mainly concentrated in arid provinces including Northern, North Central, and Western Provinces. Overall, the amount of surface water resources have increased.}, } @article {pmid35028522, year = {2021}, author = {Li, Q and Jiang, L and Qiao, K and Hu, Y and Chen, B and Zhang, X and Ding, Y and Yang, Z and Li, C}, title = {INCloud: integrated neuroimaging cloud for data collection, management, analysis and clinical translations.}, journal = {General psychiatry}, volume = {34}, number = {6}, pages = {e100651}, pmid = {35028522}, issn = {2517-729X}, abstract = {BACKGROUND: Neuroimaging techniques provide rich and accurate measures of brain structure and function, and have become one of the most popular methods in mental health and neuroscience research. Rapidly growing neuroimaging research generates massive amounts of data, bringing new challenges in data collection, large-scale data management, efficient computing requirements and data mining and analyses.

AIMS: To tackle the challenges and promote the application of neuroimaging technology in clinical practice, we developed an integrated neuroimaging cloud (INCloud). INCloud provides a full-stack solution for the entire process of large-scale neuroimaging data collection, management, analysis and clinical applications.

METHODS: INCloud consists of data acquisition systems, a data warehouse, automatic multimodal image quality check and processing systems, a brain feature library, a high-performance computing cluster and computer-aided diagnosis systems (CADS) for mental disorders. A unique design of INCloud is the brain feature library that converts the unit of data management from image to image features such as hippocampal volume. Connecting the CADS to the scientific database, INCloud allows the accumulation of scientific data to continuously improve the accuracy of objective diagnosis of mental disorders.

RESULTS: Users can manage and analyze neuroimaging data on INCloud, without the need to download them to the local device. INCloud users can query, manage, analyze and share image features based on customized criteria. Several examples of 'mega-analyses' based on the brain feature library are shown.

CONCLUSIONS: Compared with traditional neuroimaging acquisition and analysis workflow, INCloud features safe and convenient data management and sharing, reduced technical requirements for researchers, high-efficiency computing and data mining, and straightforward translations to clinical service. The design and implementation of the system are also applicable to imaging research platforms in other fields.}, } @article {pmid35027906, year = {2021}, author = {Han, H and Gu, X}, title = {Linkage Between Inclusive Digital Finance and High-Tech Enterprise Innovation Performance: Role of Debt and Equity Financing.}, journal = {Frontiers in psychology}, volume = {12}, number = {}, pages = {814408}, pmid = {35027906}, issn = {1664-1078}, abstract = {This study investigates the relationship between digital financial inclusion, external financing, and the innovation performance of high-tech enterprises in China. The choice of corporate financing methods is an important part of organizational behavioral psychology, and different financing models will have a certain effect on organizational performance, especially in the digital economy environment. Therefore, based on resource dependence theory and financing constraint theory, the present study utilizes the panel data collected from the China Stock Market & Accounting Research (CSMAR) database from 2011 to 2020 of 112 companies in the Yangtze River Delta region and the "The Peking University Digital Financial Inclusion Index of China (PKU-DFIIC)" released by the Peking University Digital Finance Research Center and Ant Financial Group. The results show that the Digital Financial Inclusion Index (DFIIC) has a significant positive correlation with the innovation performance of high-tech enterprises. The higher the level of debt financing, the stronger the role of digital financial inclusion in promoting innovation performance. Investigating the DFIIC in terms of coverage breadth and usage depth, we find that usage depth does not significantly encourage innovation performance. The effect of the interaction between coverage breadth and external financing is consistent with the results for the DFIIC. The study suggests that equity financing promotes the usage depth of the DFIIC in state-owned enterprises. In contrast, debt financing promotes the coverage breadth of non-state-owned enterprises. Finally, we propose relevant policy recommendations based on the research results. It includes in-depth popularization of inclusive finance in the daily operations of enterprises at the technical level, refinement of external financing policy incentives for enterprises based on the characteristics of ownership, and strengthening the research of technologies such as big data, artificial intelligence (AI), and cloud computing. The paper presents a range of theoretical and practical implications for practitioners and academics relevant to high-tech enterprises.}, } @article {pmid35022699, year = {2022}, author = {Decap, D and de Schaetzen van Brienen, L and Larmuseau, M and Costanza, P and Herzeel, C and Wuyts, R and Marchal, K and Fostier, J}, title = {Halvade somatic: Somatic variant calling with Apache Spark.}, journal = {GigaScience}, volume = {11}, number = {1}, pages = {}, pmid = {35022699}, issn = {2047-217X}, mesh = {*High-Throughput Nucleotide Sequencing/methods ; Polymorphism, Single Nucleotide ; Sequence Analysis, DNA/methods ; *Software ; Exome Sequencing ; Whole Genome Sequencing ; }, abstract = {BACKGROUND: The accurate detection of somatic variants from sequencing data is of key importance for cancer treatment and research. Somatic variant calling requires a high sequencing depth of the tumor sample, especially when the detection of low-frequency variants is also desired. In turn, this leads to large volumes of raw sequencing data to process and hence, large computational requirements. For example, calling the somatic variants according to the GATK best practices guidelines requires days of computing time for a typical whole-genome sequencing sample.

FINDINGS: We introduce Halvade Somatic, a framework for somatic variant calling from DNA sequencing data that takes advantage of multi-node and/or multi-core compute platforms to reduce runtime. It relies on Apache Spark to provide scalable I/O and to create and manage data streams that are processed on different CPU cores in parallel. Halvade Somatic contains all required steps to process the tumor and matched normal sample according to the GATK best practices recommendations: read alignment (BWA), sorting of reads, preprocessing steps such as marking duplicate reads and base quality score recalibration (GATK), and, finally, calling the somatic variants (Mutect2). Our approach reduces the runtime on a single 36-core node to 19.5 h compared to a runtime of 84.5 h for the original pipeline, a speedup of 4.3 times. Runtime can be further decreased by scaling to multiple nodes, e.g., we observe a runtime of 1.36 h using 16 nodes, an additional speedup of 14.4 times. Halvade Somatic supports variant calling from both whole-genome sequencing and whole-exome sequencing data and also supports Strelka2 as an alternative or complementary variant calling tool. We provide a Docker image to facilitate single-node deployment. Halvade Somatic can be executed on a variety of compute platforms, including Amazon EC2 and Google Cloud.

CONCLUSIONS: To our knowledge, Halvade Somatic is the first somatic variant calling pipeline that leverages Big Data processing platforms and provides reliable, scalable performance. Source code is freely available.}, } @article {pmid35022620, year = {2022}, author = {Feldman, D and Funk, L and Le, A and Carlson, RJ and Leiken, MD and Tsai, F and Soong, B and Singh, A and Blainey, PC}, title = {Pooled genetic perturbation screens with image-based phenotypes.}, journal = {Nature protocols}, volume = {17}, number = {2}, pages = {476-512}, pmid = {35022620}, issn = {1750-2799}, support = {P50 HG006193/HG/NHGRI NIH HHS/United States ; R01 HG009283/HG/NHGRI NIH HHS/United States ; RM1 HG006193/HG/NHGRI NIH HHS/United States ; }, mesh = {Humans ; Gene Library ; Genetic Testing/methods ; High-Throughput Nucleotide Sequencing/methods ; Image Processing, Computer-Assisted/methods ; Lentivirus/genetics ; *Phenotype ; }, abstract = {Discovery of the genetic components underpinning fundamental and disease-related processes is being rapidly accelerated by combining efficient, programmable genetic engineering with phenotypic readouts of high spatial, temporal and/or molecular resolution. Microscopy is a fundamental tool for studying cell biology, but its lack of high-throughput sequence readouts hinders integration in large-scale genetic screens. Optical pooled screens using in situ sequencing provide massively scalable integration of barcoded lentiviral libraries (e.g., CRISPR perturbation libraries) with high-content imaging assays, including dynamic processes in live cells. The protocol uses standard lentiviral vectors and molecular biology, providing single-cell resolution of phenotype and engineered genotype, scalability to millions of cells and accurate sequence reads sufficient to distinguish >10[6] perturbations. In situ amplification takes ~2 d, while sequencing can be performed in ~1.5 h per cycle. The image analysis pipeline provided enables fully parallel automated sequencing analysis using a cloud or cluster computing environment.}, } @article {pmid35016766, year = {2022}, author = {Maniyar, CB and Kumar, A and Mishra, DR}, title = {Continuous and Synoptic Assessment of Indian Inland Waters for Harmful Algae Blooms.}, journal = {Harmful algae}, volume = {111}, number = {}, pages = {102160}, doi = {10.1016/j.hal.2021.102160}, pmid = {35016766}, issn = {1878-1470}, mesh = {*Cyanobacteria ; *Harmful Algal Bloom ; India ; Lakes/microbiology ; Water Quality ; }, abstract = {Cyanobacterial Harmful Algal Blooms (CyanoHABs) are progressively becoming a major water quality, socioeconomic, and health hazard worldwide. In India, there are frequent episodes of severe CyanoHABs, which are left untreated due to a lack of awareness and monitoring infrastructure, affecting the economy of the country gravely. In this study, for the first time, we present a country-wide analysis of CyanoHABs in India by developing a novel interactive cloud-based dashboard called "CyanoKhoj" in Google Earth Engine (GEE) which uses Sentinel-3 Ocean and Land Colour Instrument (OLCI) remotely sensed datasets. The main goal of this study was to showcase the utility of CyanoKhoj for rapid monitoring and discuss the widespread CyanoHABs problems across India. We demonstrate the utility of Cyanokhoj by including select case studies of lakes and reservoirs geographically spread across five states: Bargi and Gandhisagar Dams in Madhya Pradesh, Hirakud Reservoir in Odisha, Ukai Dam in Gujarat, Linganamakki Reservoir in Karnataka, and Pulicat Lake in Tamil Nadu. These sites were studied from September to November 2018 using CyanoKhoj, which is capable of near-real-time monitoring and country-wide assessment of CyanoHABs. We used CyanoKhoj to prepare spatiotemporal maps of Chlorophyll-a (Chl-a) content and Cyanobacterial Cell Density (CCD) to study the local spread of the CyanoHABs and their phenology in these waterbodies. A first-ever all-India CCD map is also presented for the year 2018, which highlights the spatial spread of CyanoHABs throughout the country (32 large waterbodies across India with severe bloom: CCD>2,500,000). Results indicate that CyanoHABs are most prevalent in nutrient-rich waterbodies prone to industrial and other nutrient-rich discharges. A clear temporal evolution of the blooms showed that they are dominant during the post-monsoon season (September-October) when the nutrient concentrations in the waterbodies are at their peak, and they begin to decline towards winter (November-December). CyanoKhoj is an open-source tool that can have a significant broader impact in mapping CyanoHABs not only throughout cyanobacteria data-scarce India, but on a global level using archived and future Sentinel-3A/B OLCI data.}, } @article {pmid35013645, year = {2022}, author = {Saxena, D and Singh, AK}, title = {OFP-TM: an online VM failure prediction and tolerance model towards high availability of cloud computing environments.}, journal = {The Journal of supercomputing}, volume = {78}, number = {6}, pages = {8003-8024}, pmid = {35013645}, issn = {0920-8542}, abstract = {The indispensable collaboration of cloud computing in every digital service has raised its resource usage exponentially. The ever-growing demand of cloud resources evades service availability leading to critical challenges such as cloud outages, SLA violation, and excessive power consumption. Previous approaches have addressed this problem by utilizing multiple cloud platforms or running multiple replicas of a Virtual Machine (VM) resulting into high operational cost. This paper has addressed this alarming problem from a different perspective by proposing a novel O nline virtual machine F ailure P rediction and T olerance M odel (OFP-TM) with high availability awareness embedded in physical machines as well as virtual machines. The failure-prone VMs are estimated in real-time based on their future resource usage by developing an ensemble approach-based resource predictor. These VMs are assigned to a failure tolerance unit comprising of a resource provision matrix and Selection Box (S-Box) mechanism which triggers the migration of failure-prone VMs and handle any outage beforehand while maintaining the desired level of availability for cloud users. The proposed model is evaluated and compared against existing related approaches by simulating cloud environment and executing several experiments using a real-world workload Google Cluster dataset. Consequently, it has been concluded that OFP-TM improves availability and scales down the number of live VM migrations up to 33.5% and 83.3%, respectively, over without OFP-TM.}, } @article {pmid35009941, year = {2022}, author = {Syed, SA and Rashid, M and Hussain, S and Azim, F and Zahid, H and Umer, A and Waheed, A and Zareei, M and Vargas-Rosales, C}, title = {QoS Aware and Fault Tolerance Based Software-Defined Vehicular Networks Using Cloud-Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {1}, pages = {}, pmid = {35009941}, issn = {1424-8220}, support = {MX009876//This project is supported by Tecnologico de Monterrey, School of Engineering and Sciences, Zapopan 45201, Mexico/ ; }, abstract = {Software-defined network (SDN) and vehicular ad-hoc network (VANET) combined provided a software-defined vehicular network (SDVN). To increase the quality of service (QoS) of vehicle communication and to make the overall process efficient, researchers are working on VANET communication systems. Current research work has made many strides, but due to the following limitations, it needs further investigation and research: Cloud computing is used for messages/tasks execution instead of fog computing, which increases response time. Furthermore, a fault tolerance mechanism is used to reduce the tasks/messages failure ratio. We proposed QoS aware and fault tolerance-based software-defined V vehicular networks using Cloud-fog computing (QAFT-SDVN) to address the above issues. We provided heuristic algorithms to solve the above limitations. The proposed model gets vehicle messages through SDN nodes which are placed on fog nodes. SDN controllers receive messages from nearby SDN units and prioritize the messages in two different ways. One is the message nature way, while the other one is deadline and size way of messages prioritization. SDN controller categorized in safety and non-safety messages and forward to the destination. After sending messages to their destination, we check their acknowledgment; if the destination receives the messages, then no action is taken; otherwise, we use a fault tolerance mechanism. We send the messages again. The proposed model is implemented in CloudSIm and iFogSim, and compared with the latest models. The results show that our proposed model decreased response time by 50% of the safety and non-safety messages by using fog nodes for the SDN controller. Furthermore, we reduced the execution time of the safety and non-safety messages by up to 4%. Similarly, compared with the latest model, we reduced the task failure ratio by 20%, 15%, 23.3%, and 22.5%.}, } @article {pmid35009820, year = {2021}, author = {Loke, CH and Adam, MS and Nordin, R and Abdullah, NF and Abu-Samah, A}, title = {Physical Distancing Device with Edge Computing for COVID-19 (PADDIE-C19).}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {1}, pages = {}, pmid = {35009820}, issn = {1424-8220}, support = {FA2386-20-1-4045//United States Air Force Office of Scientific Research/ ; }, mesh = {Artificial Intelligence ; *COVID-19 ; Humans ; Masks ; *Physical Distancing ; SARS-CoV-2 ; }, abstract = {The most effective methods of preventing COVID-19 infection include maintaining physical distancing and wearing a face mask while in close contact with people in public places. However, densely populated areas have a greater incidence of COVID-19 dissemination, which is caused by people who do not comply with standard operating procedures (SOPs). This paper presents a prototype called PADDIE-C19 (Physical Distancing Device with Edge Computing for COVID-19) to implement the physical distancing monitoring based on a low-cost edge computing device. The PADDIE-C19 provides real-time results and responses, as well as notifications and warnings to anyone who violates the 1-m physical distance rule. In addition, PADDIE-C19 includes temperature screening using an MLX90614 thermometer and ultrasonic sensors to restrict the number of people on specified premises. The Neural Network Processor (KPU) in Grove Artificial Intelligence Hardware Attached on Top (AI HAT), an edge computing unit, is used to accelerate the neural network model on person detection and achieve up to 18 frames per second (FPS). The results show that the accuracy of person detection with Grove AI HAT could achieve 74.65% and the average absolute error between measured and actual physical distance is 8.95 cm. Furthermore, the accuracy of the MLX90614 thermometer is guaranteed to have less than 0.5 °C value difference from the more common Fluke 59 thermometer. Experimental results also proved that when cloud computing is compared to edge computing, the Grove AI HAT achieves the average performance of 18 FPS for a person detector (kmodel) with an average 56 ms execution time in different networks, regardless of the network connection type or speed.}, } @article {pmid35009814, year = {2021}, author = {Ojo, MO and Viola, I and Baratta, M and Giordano, S}, title = {Practical Experiences of a Smart Livestock Location Monitoring System Leveraging GNSS, LoRaWAN and Cloud Services.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {1}, pages = {}, pmid = {35009814}, issn = {1424-8220}, mesh = {Animals ; Cloud Computing ; Farms ; *Internet of Things ; *Livestock ; Monitoring, Physiologic ; }, abstract = {Livestock farming is, in most cases in Europe, unsupervised, thus making it difficult to ensure adequate control of the position of the animals for the improvement of animal welfare. In addition, the geographical areas involved in livestock grazing usually have difficult access with harsh orography and lack of communications infrastructure, thus the need to provide a low-power livestock localization and monitoring system is of paramount importance, which is crucial not for a sustainable agriculture, but also for the protection of native breeds and meats thanks to their controlled supervision. In this context, this work presents an Internet of things (IoT)-based system integrating low-power wide area (LPWA) technology, cloud, and virtualization services to provide real-time livestock location monitoring. Taking into account the constraints coming from the environment in terms of energy supply and network connectivity, our proposed system is based on a wearable device equipped with inertial sensors, Global Positioning System (GPS) receiver, and LoRaWAN transceiver, which can provide a satisfactory compromise between performance, cost, and energy consumption. At first, this article provides the state-of-the-art localization techniques and technologies applied to smart livestock. Then, we proceed to provide the hardware and firmware co-design to achieve very low energy consumption, thus providing a significant positive impact to the battery life. The proposed platform has been evaluated in a pilot test in the northern part of Italy, evaluating different configurations in terms of sampling period, experimental duration, and number of devices. The results are analyzed and discussed for packet delivery ratio, energy consumption, localization accuracy, battery discharge measurement, and delay.}, } @article {pmid35009770, year = {2021}, author = {Forcén-Muñoz, M and Pavón-Pulido, N and López-Riquelme, JA and Temnani-Rajjaf, A and Berríos, P and Morais, R and Pérez-Pastor, A}, title = {Irriman Platform: Enhancing Farming Sustainability through Cloud Computing Techniques for Irrigation Management.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {1}, pages = {}, pmid = {35009770}, issn = {1424-8220}, support = {PCIN-2017-091//Ministry of Economy, Industry and Competitiveness of Spain, National Research Agency (AEI)/ ; PID2019-106226RB-C22//National Research Agency (AEI) Spain/ ; }, mesh = {Agricultural Irrigation ; *Agriculture ; Climate Change ; *Cloud Computing ; Crops, Agricultural ; Farms ; }, abstract = {Crop sustainability is essential for balancing economic development and environmental care, mainly in strong and very competitive regions in the agri-food sector, such as the Region of Murcia in Spain, considered to be the orchard of Europe, despite being a semi-arid area with an important scarcity of fresh water. In this region, farmers apply efficient techniques to minimize supplies and maximize quality and productivity; however, the effects of climate change and the degradation of significant natural environments, such as, the "Mar Menor", the most extent saltwater lagoon of Europe, threatened by resources overexploitation, lead to the search of even better irrigation management techniques to avoid certain effects which could damage the quaternary aquifer connected to such lagoon. This paper describes the Irriman Platform, a system based on Cloud Computing techniques, which includes low-cost wireless data loggers, capable of acquiring data from a wide range of agronomic sensors, and a novel software architecture for safely storing and processing such information, making crop monitoring and irrigation management easier. The proposed platform helps agronomists to optimize irrigation procedures through a usable web-based tool which allows them to elaborate irrigation plans and to evaluate their effectiveness over crops. The system has been deployed in a large number of representative crops, located along near 50,000 ha of the surface, during several phenological cycles. Results demonstrate that the system enables crop monitoring and irrigation optimization, and makes interaction between farmers and agronomists easier.}, } @article {pmid35009740, year = {2021}, author = {Angel, NA and Ravindran, D and Vincent, PMDR and Srinivasan, K and Hu, YC}, title = {Recent Advances in Evolving Computing Paradigms: Cloud, Edge, and Fog Technologies.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {1}, pages = {}, pmid = {35009740}, issn = {1424-8220}, support = {MOST 110-2622-E-197-009//Ministry of Science and Technology/ ; }, abstract = {Cloud computing has become integral lately due to the ever-expanding Internet-of-things (IoT) network. It still is and continues to be the best practice for implementing complex computational applications, emphasizing the massive processing of data. However, the cloud falls short due to the critical constraints of novel IoT applications generating vast data, which entails a swift response time with improved privacy. The newest drift is moving computational and storage resources to the edge of the network, involving a decentralized distributed architecture. The data processing and analytics perform at proximity to end-users, and overcome the bottleneck of cloud computing. The trend of deploying machine learning (ML) at the network edge to enhance computing applications and services has gained momentum lately, specifically to reduce latency and energy consumed while optimizing the security and management of resources. There is a need for rigorous research efforts oriented towards developing and implementing machine learning algorithms that deliver the best results in terms of speed, accuracy, storage, and security, with low power consumption. This extensive survey presented on the prominent computing paradigms in practice highlights the latest innovations resulting from the fusion between ML and the evolving computing paradigms and discusses the underlying open research challenges and future prospects.}, } @article {pmid35009652, year = {2021}, author = {Quezada-Gaibor, D and Torres-Sospedra, J and Nurmi, J and Koucheryavy, Y and Huerta, J}, title = {Cloud Platforms for Context-Adaptive Positioning and Localisation in GNSS-Denied Scenarios-A Systematic Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {1}, pages = {}, pmid = {35009652}, issn = {1424-8220}, support = {813278//H2020 Marie Skłodowska-Curie Actions/ ; 101023072//H2020 Marie Skłodowska-Curie Actions/ ; PTQ2018-009981//Ministerio de Ciencia, Innovación y Universidades/ ; }, abstract = {Cloud Computing and Cloud Platforms have become an essential resource for businesses, due to their advanced capabilities, performance, and functionalities. Data redundancy, scalability, and security, are among the key features offered by cloud platforms. Location-Based Services (LBS) often exploit cloud platforms to host positioning and localisation systems. This paper introduces a systematic review of current positioning platforms for GNSS-denied scenarios. We have undertaken a comprehensive analysis of each component of the positioning and localisation systems, including techniques, protocols, standards, and cloud services used in the state-of-the-art deployments. Furthermore, this paper identifies the limitations of existing solutions, outlining shortcomings in areas that are rarely subjected to scrutiny in existing reviews of indoor positioning, such as computing paradigms, privacy, and fault tolerance. We then examine contributions in the areas of efficient computation, interoperability, positioning, and localisation. Finally, we provide a brief discussion concerning the challenges for cloud platforms based on GNSS-denied scenarios.}, } @article {pmid35009649, year = {2021}, author = {Ali, A and Iqbal, MM and Jamil, H and Akbar, H and Muthanna, A and Ammi, M and Althobaiti, MM}, title = {Multilevel Central Trust Management Approach for Task Scheduling on IoT-Based Mobile Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {1}, pages = {}, pmid = {35009649}, issn = {1424-8220}, mesh = {Algorithms ; *Cloud Computing ; Computers, Handheld ; *Trust ; }, abstract = {With the increasing number of mobile devices and IoT devices across a wide range of real-life applications, our mobile cloud computing devices will not cope with this growing number of audiences soon, which implies and demands the need to shift to fog computing. Task scheduling is one of the most demanding scopes after the trust computation inside the trustable nodes. The mobile devices and IoT devices transfer the resource-intensive tasks towards mobile cloud computing. Some tasks are resource-intensive and not trustable to allocate to the mobile cloud computing resources. This consequently gives rise to trust evaluation and data sync-up of devices joining and leaving the network. The resources are more intensive for cloud computing and mobile cloud computing. Time, energy, and resources are wasted due to the nontrustable nodes. This research article proposes a multilevel trust enhancement approach for efficient task scheduling in mobile cloud environments. We first calculate the trustable tasks needed to offload towards the mobile cloud computing. Then, an efficient and dynamic scheduler is added to enhance the task scheduling after trust computation using social and environmental trust computation techniques. To improve the time and energy efficiency of IoT and mobile devices using the proposed technique, the energy computation and time request computation are compared with the existing methods from literature, which identified improvements in the results. Our proposed approach is centralized to tackle constant SyncUPs of incoming devices' trust values with mobile cloud computing. With the benefits of mobile cloud computing, the centralized data distribution method is a positive approach.}, } @article {pmid35009609, year = {2021}, author = {Rocha-Jácome, C and Carvajal, RG and Chavero, FM and Guevara-Cabezas, E and Hidalgo Fort, E}, title = {Industry 4.0: A Proposal of Paradigm Organization Schemes from a Systematic Literature Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {1}, pages = {}, pmid = {35009609}, issn = {1424-8220}, support = {PID2019-107258RB-C31//AEI/FEDER/ ; 802C2000003//Agencia IDEA/ ; }, mesh = {Bibliometrics ; Humans ; *Industry ; *Technology ; }, abstract = {Currently, the concept of Industry 4.0 is well known; however, it is extremely complex, as it is constantly evolving and innovating. It includes the participation of many disciplines and areas of knowledge as well as the integration of many technologies, both mature and emerging, but working in collaboration and relying on their study and implementation under the novel criteria of Cyber-Physical Systems. This study starts with an exhaustive search for updated scientific information of which a bibliometric analysis is carried out with results presented in different tables and graphs. Subsequently, based on the qualitative analysis of the references, we present two proposals for the schematic analysis of Industry 4.0 that will help academia and companies to support digital transformation studies. The results will allow us to perform a simple alternative analysis of Industry 4.0 to understand the functions and scope of the integrating technologies to achieve a better collaboration of each area of knowledge and each professional, considering the potential and limitations of each one, supporting the planning of an appropriate strategy, especially in the management of human resources, for the successful execution of the digital transformation of the industry.}, } @article {pmid35005616, year = {2021}, author = {Goudarzi, A and Moya-Galé, G}, title = {Automatic Speech Recognition in Noise for Parkinson's Disease: A Pilot Study.}, journal = {Frontiers in artificial intelligence}, volume = {4}, number = {}, pages = {809321}, pmid = {35005616}, issn = {2624-8212}, abstract = {The sophistication of artificial intelligence (AI) technologies has significantly advanced in the past decade. However, the observed unpredictability and variability of AI behavior in noisy signals is still underexplored and represents a challenge when trying to generalize AI behavior to real-life environments, especially for people with a speech disorder, who already experience reduced speech intelligibility. In the context of developing assistive technology for people with Parkinson's disease using automatic speech recognition (ASR), this pilot study reports on the performance of Google Cloud speech-to-text technology with dysarthric and healthy speech in the presence of multi-talker babble noise at different intensity levels. Despite sensitivities and shortcomings, it is possible to control the performance of these systems with current tools in order to measure speech intelligibility in real-life conditions.}, } @article {pmid35003323, year = {2021}, author = {Almusallam, N and Alabdulatif, A and Alarfaj, F}, title = {Analysis of Privacy-Preserving Edge Computing and Internet of Things Models in Healthcare Domain.}, journal = {Computational and mathematical methods in medicine}, volume = {2021}, number = {}, pages = {6834800}, pmid = {35003323}, issn = {1748-6718}, mesh = {Cloud Computing ; Computational Biology ; *Computer Security ; *Delivery of Health Care ; Electronic Health Records ; Humans ; *Internet of Things ; *Privacy ; }, abstract = {The healthcare sector is rapidly being transformed to one that operates in new computing environments. With researchers increasingly committed to finding and expanding healthcare solutions to include the Internet of Things (IoT) and edge computing, there is a need to monitor more closely than ever the data being collected, shared, processed, and stored. The advent of cloud, IoT, and edge computing paradigms poses huge risks towards the privacy of data, especially, in the healthcare environment. However, there is a lack of comprehensive research focused on seeking efficient and effective solutions that ensure data privacy in the healthcare domain. The data being collected and processed by healthcare applications is sensitive, and its manipulation by malicious actors can have catastrophic repercussions. This paper discusses the current landscape of privacy-preservation solutions in IoT and edge healthcare applications. It describes the common techniques adopted by researchers to integrate privacy in their healthcare solutions. Furthermore, the paper discusses the limitations of these solutions in terms of their technical complexity, effectiveness, and sustainability. The paper closes with a summary and discussion of the challenges of safeguarding privacy in IoT and edge healthcare solutions which need to be resolved for future applications.}, } @article {pmid35002704, year = {2021}, author = {Wang, S and Hou, Y and Li, X and Meng, X and Zhang, Y and Wang, X}, title = {Practical Implementation of Artificial Intelligence-Based Deep Learning and Cloud Computing on the Application of Traditional Medicine and Western Medicine in the Diagnosis and Treatment of Rheumatoid Arthritis.}, journal = {Frontiers in pharmacology}, volume = {12}, number = {}, pages = {765435}, pmid = {35002704}, issn = {1663-9812}, abstract = {Rheumatoid arthritis (RA), an autoimmune disease of unknown etiology, is a serious threat to the health of middle-aged and elderly people. Although western medicine, traditional medicine such as traditional Chinese medicine, Tibetan medicine and other ethnic medicine have shown certain advantages in the diagnosis and treatment of RA, there are still some practical shortcomings, such as delayed diagnosis, improper treatment scheme and unclear drug mechanism. At present, the applications of artificial intelligence (AI)-based deep learning and cloud computing has aroused wide attention in the medical and health field, especially in screening potential active ingredients, targets and action pathways of single drugs or prescriptions in traditional medicine and optimizing disease diagnosis and treatment models. Integrated information and analysis of RA patients based on AI and medical big data will unquestionably benefit more RA patients worldwide. In this review, we mainly elaborated the application status and prospect of AI-assisted deep learning and cloud computation-oriented western medicine and traditional medicine on the diagnosis and treatment of RA in different stages. It can be predicted that with the help of AI, more pharmacological mechanisms of effective ethnic drugs against RA will be elucidated and more accurate solutions will be provided for the treatment and diagnosis of RA in the future.}, } @article {pmid35002664, year = {2021}, author = {Bai, Y and Liu, Q and Wu, W and Feng, Y}, title = {cuSCNN: A Secure and Batch-Processing Framework for Privacy-Preserving Convolutional Neural Network Prediction on GPU.}, journal = {Frontiers in computational neuroscience}, volume = {15}, number = {}, pages = {799977}, pmid = {35002664}, issn = {1662-5188}, abstract = {The emerging topic of privacy-preserving deep learning as a service has attracted increasing attention in recent years, which focuses on building an efficient and practical neural network prediction framework to secure client and model-holder data privately on the cloud. In such a task, the time cost of performing the secure linear layers is expensive, where matrix multiplication is the atomic operation. Most existing mix-based solutions heavily emphasized employing BGV-based homomorphic encryption schemes to secure the linear layer on the CPU platform. However, they suffer an efficiency and energy loss when dealing with a larger-scale dataset, due to the complicated encoded methods and intractable ciphertext operations. To address it, we propose cuSCNN, a secure and efficient framework to perform the privacy prediction task of a convolutional neural network (CNN), which can flexibly perform on the GPU platform. Its main idea is 2-fold: (1) To avoid the trivia and complicated homomorphic matrix computations brought by BGV-based solutions, it adopts GSW-based homomorphic matrix encryption to efficiently enable the linear layers of CNN, which is a naive method to secure matrix computation operations. (2) To improve the computation efficiency on GPU, a hybrid optimization approach based on CUDA (Compute Unified Device Architecture) has been proposed to improve the parallelism level and memory access speed when performing the matrix multiplication on GPU. Extensive experiments are conducted on industrial datasets and have shown the superior performance of the proposed cuSCNN framework in terms of runtime and power consumption compared to the other frameworks.}, } @article {pmid35002476, year = {2022}, author = {Zhu, L and Wang, C and He, Z and Zhang, Y}, title = {A lightweight automatic sleep staging method for children using single-channel EEG based on edge artificial intelligence.}, journal = {World wide web}, volume = {25}, number = {5}, pages = {1883-1903}, pmid = {35002476}, issn = {1573-1413}, abstract = {With the development of telemedicine and edge computing, edge artificial intelligence (AI) will become a new development trend for smart medicine. On the other hand, nearly one-third of children suffer from sleep disorders. However, all existing sleep staging methods are for adults. Therefore, we adapted edge AI to develop a lightweight automatic sleep staging method for children using single-channel EEG. The trained sleep staging model will be deployed to edge smart devices so that the sleep staging can be implemented on edge devices which will greatly save network resources and improving the performance and privacy of sleep staging application. Then the results and hypnogram will be uploaded to the cloud server for further analysis by the physicians to get sleep disease diagnosis reports and treatment opinions. We utilized 1D convolutional neural networks (1D-CNN) and long short term memory (LSTM) to build our sleep staging model, named CSleepNet. We tested the model on our childrens sleep (CS) dataset and sleep-EDFX dataset. For the CS dataset, we experimented with F4-M1 channel EEG using four different loss functions, and the logcosh performed best with overall accuracy of 83.06% and F1-score of 76.50%. We used Fpz-Cz and Pz-Oz channel EEG to train our model in Sleep-EDFX dataset, and achieved an accuracy of 86.41% without manual feature extraction. The experimental results show that our method has great potential. It not only plays an important role in sleep-related research, but also can be widely used in the classification of other time sequences physiological signals.}, } @article {pmid35002010, year = {2022}, author = {Peng, Y and Liu, E and Peng, S and Chen, Q and Li, D and Lian, D}, title = {Using artificial intelligence technology to fight COVID-19: a review.}, journal = {Artificial intelligence review}, volume = {55}, number = {6}, pages = {4941-4977}, pmid = {35002010}, issn = {0269-2821}, abstract = {In late December 2019, a new type of coronavirus was discovered, which was later named severe acute respiratory syndrome coronavirus 2(SARS-CoV-2). Since its discovery, the virus has spread globally, with 2,975,875 deaths as of 15 April 2021, and has had a huge impact on our health systems and economy. How to suppress the continued spread of new coronary pneumonia is the main task of many scientists and researchers. The introduction of artificial intelligence technology has provided a huge contribution to the suppression of the new coronavirus. This article discusses the main application of artificial intelligence technology in the suppression of coronavirus from three major aspects of identification, prediction, and development through a large amount of literature research, and puts forward the current main challenges and possible development directions. The results show that it is an effective measure to combine artificial intelligence technology with a variety of new technologies to predict and identify COVID-19 patients.}, } @article {pmid34999074, year = {2022}, author = {Elnashar, A and Zeng, H and Wu, B and Gebremicael, TG and Marie, K}, title = {Assessment of environmentally sensitive areas to desertification in the Blue Nile Basin driven by the MEDALUS-GEE framework.}, journal = {The Science of the total environment}, volume = {815}, number = {}, pages = {152925}, doi = {10.1016/j.scitotenv.2022.152925}, pmid = {34999074}, issn = {1879-1026}, mesh = {Climate ; *Conservation of Natural Resources ; *Soil ; }, abstract = {Assessing environmentally sensitive areas (ESA) to desertification and understanding their primary drivers are necessary for applying targeted management practices to combat land degradation at the basin scale. We have developed the MEditerranean Desertification And Land Use framework in the Google Earth Engine cloud platform (MEDALUS-GEE) to map and assess the ESA index at 300 m grids in the Blue Nile Basin (BNB). The ESA index was derived from elaborating 19 key indicators representing soil, climate, vegetation, and management through the geometric mean of their sensitivity scores. The results showed that 43.4%, 28.8%, and 70.4% of the entire BNB, Upper BNB, and Lower BNB, respectively, are highly susceptible to desertification, indicating appropriate land and water management measures should be urgently implemented. Our findings also showed that the main land degradation drivers are moderate to intensive cultivation across the BNB, high slope gradient and water erosion in the Upper BNB, and low soil organic matter and vegetation cover in the Lower BNB. The study presented an integrated monitoring and assessment framework for understanding desertification processes to help achieve land-related sustainable development goals.}, } @article {pmid34997109, year = {2022}, author = {Alrebdi, N and Alabdulatif, A and Iwendi, C and Lian, Z}, title = {SVBE: searchable and verifiable blockchain-based electronic medical records system.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {266}, pmid = {34997109}, issn = {2045-2322}, support = {501100007414//Qassim University (QU)/ ; 501100007414//Qassim University (QU)/ ; }, abstract = {Central management of electronic medical systems faces a major challenge because it requires trust in a single entity that cannot effectively protect files from unauthorized access or attacks. This challenge makes it difficult to provide some services in central electronic medical systems, such as file search and verification, although they are needed. This gap motivated us to develop a system based on blockchain that has several characteristics: decentralization, security, anonymity, immutability, and tamper-proof. The proposed system provides several services: storage, verification, and search. The system consists of a smart contract that connects to a decentralized user application through which users can transact with the system. In addition, the system uses an interplanetary file system (IPFS) and cloud computing to store patients' data and files. Experimental results and system security analysis show that the system performs search and verification tasks securely and quickly through the network.}, } @article {pmid34996614, year = {2022}, author = {Li, L and Zhang, Y and Geng, Q}, title = {Mean-square bounded consensus of nonlinear multi-agent systems under deception attack.}, journal = {ISA transactions}, volume = {129}, number = {Pt A}, pages = {91-101}, doi = {10.1016/j.isatra.2021.12.020}, pmid = {34996614}, issn = {1879-2022}, abstract = {This paper researches mean-square bounded consensus for a nonlinear multi-agent system subjected to randomly occurring deception attack, process and measurement noises. Considering the measurement tampered by the attacker, an estimator is presented to obtain relative accurate state estimation, where the gain is acquired by a recursive algorithm. On this basis, a type of centralized controller is designed combined with cloud computing system. Moreover, from perspective of the defender, a detector is proposed at the side of agent to detect whether the current actuator input is attacked. Using linear matrix inequality, sufficient conditions are given for achieving mean-square bounded consensus and an upper boundary is derived. Finally, validity of the proposed method is illustrated via two simulation examples.}, } @article {pmid34989688, year = {2022}, author = {Cresswell, K and Domínguez Hernández, A and Williams, R and Sheikh, A}, title = {Key Challenges and Opportunities for Cloud Technology in Health Care: Semistructured Interview Study.}, journal = {JMIR human factors}, volume = {9}, number = {1}, pages = {e31246}, pmid = {34989688}, issn = {2292-9495}, abstract = {BACKGROUND: The use of cloud computing (involving storage and processing of data on the internet) in health care has increasingly been highlighted as having great potential in facilitating data-driven innovations. Although some provider organizations are reaping the benefits of using cloud providers to store and process their data, others are lagging behind.

OBJECTIVE: We aim to explore the existing challenges and barriers to the use of cloud computing in health care settings and investigate how perceived risks can be addressed.

METHODS: We conducted a qualitative case study of cloud computing in health care settings, interviewing a range of individuals with perspectives on supply, implementation, adoption, and integration of cloud technology. Data were collected through a series of in-depth semistructured interviews exploring current applications, implementation approaches, challenges encountered, and visions for the future. The interviews were transcribed and thematically analyzed using NVivo 12 (QSR International). We coded the data based on a sociotechnical coding framework developed in related work.

RESULTS: We interviewed 23 individuals between September 2020 and November 2020, including professionals working across major cloud providers, health care provider organizations, innovators, small and medium-sized software vendors, and academic institutions. The participants were united by a common vision of a cloud-enabled ecosystem of applications and by drivers surrounding data-driven innovation. The identified barriers to progress included the cost of data migration and skill gaps to implement cloud technologies within provider organizations, the cultural shift required to move to externally hosted services, a lack of user pull as many benefits were not visible to those providing frontline care, and a lack of interoperability standards and central regulations.

CONCLUSIONS: Implementations need to be viewed as a digitally enabled transformation of services, driven by skill development, organizational change management, and user engagement, to facilitate the implementation and exploitation of cloud-based infrastructures and to maximize returns on investment.}, } @article {pmid34989198, year = {2022}, author = {Fang, Q and Yan, S}, title = {MCX Cloud-a modern, scalable, high-performance and in-browser Monte Carlo simulation platform with cloud computing.}, journal = {Journal of biomedical optics}, volume = {27}, number = {8}, pages = {}, pmid = {34989198}, issn = {1560-2281}, support = {R01 EB026998/EB/NIBIB NIH HHS/United States ; R01 GM114365/GM/NIGMS NIH HHS/United States ; U24 NS124027/NS/NINDS NIH HHS/United States ; }, mesh = {*Cloud Computing ; Computer Simulation ; Computers ; Monte Carlo Method ; *Software ; }, abstract = {SIGNIFICANCE: Despite the ample progress made toward faster and more accurate Monte Carlo (MC) simulation tools over the past decade, the limited usability and accessibility of these advanced modeling tools remain key barriers to widespread use among the broad user community.

AIM: An open-source, high-performance, web-based MC simulator that builds upon modern cloud computing architectures is highly desirable to deliver state-of-the-art MC simulations and hardware acceleration to general users without the need for special hardware installation and optimization.

APPROACH: We have developed a configuration-free, in-browser 3D MC simulation platform-Monte Carlo eXtreme (MCX) Cloud-built upon an array of robust and modern technologies, including a Docker Swarm-based cloud-computing backend and a web-based graphical user interface (GUI) that supports in-browser 3D visualization, asynchronous data communication, and automatic data validation via JavaScript Object Notation (JSON) schemas.

RESULTS: The front-end of the MCX Cloud platform offers an intuitive simulation design, fast 3D data rendering, and convenient simulation sharing. The Docker Swarm container orchestration backend is highly scalable and can support high-demand GPU MC simulations using MCX over a dynamically expandable virtual cluster.

CONCLUSION: MCX Cloud makes fast, scalable, and feature-rich MC simulations readily available to all biophotonics researchers without overhead. It is fully open-source and can be freely accessed at http://mcx.space/cloud.}, } @article {pmid34987566, year = {2021}, author = {Alsuhibany, SA and Abdel-Khalek, S and Algarni, A and Fayomi, A and Gupta, D and Kumar, V and Mansour, RF}, title = {Ensemble of Deep Learning Based Clinical Decision Support System for Chronic Kidney Disease Diagnosis in Medical Internet of Things Environment.}, journal = {Computational intelligence and neuroscience}, volume = {2021}, number = {}, pages = {4931450}, pmid = {34987566}, issn = {1687-5273}, mesh = {*Decision Support Systems, Clinical ; *Deep Learning ; Humans ; *Internet of Things ; Neural Networks, Computer ; *Renal Insufficiency, Chronic/diagnosis ; }, abstract = {Recently, Internet of Things (IoT) and cloud computing environments become commonly employed in several healthcare applications by the integration of monitoring things such as sensors and medical gadgets for observing remote patients. For availing of improved healthcare services, the huge count of data generated by IoT gadgets from the medicinal field can be investigated in the CC environment rather than relying on limited processing and storage resources. At the same time, earlier identification of chronic kidney disease (CKD) becomes essential to reduce the mortality rate significantly. This study develops an ensemble of deep learning based clinical decision support systems (EDL-CDSS) for CKD diagnosis in the IoT environment. The goal of the EDL-CDSS technique is to detect and classify different stages of CKD using the medical data collected by IoT devices and benchmark repositories. In addition, the EDL-CDSS technique involves the design of Adaptive Synthetic (ADASYN) technique for outlier detection process. Moreover, an ensemble of three models, namely, deep belief network (DBN), kernel extreme learning machine (KELM), and convolutional neural network with gated recurrent unit (CNN-GRU), are performed. Finally, quasi-oppositional butterfly optimization algorithm (QOBOA) is used for the hyperparameter tuning of the DBN and CNN-GRU models. A wide range of simulations was carried out and the outcomes are studied in terms of distinct measures. A brief outcomes analysis highlighted the supremacy of the EDL-CDSS technique on exiting approaches.}, } @article {pmid34983991, year = {2022}, author = {Perkel, JM}, title = {Terra takes the pain out of 'omics' computing in the cloud.}, journal = {Nature}, volume = {601}, number = {7891}, pages = {154-155}, doi = {10.1038/d41586-021-03822-7}, pmid = {34983991}, issn = {1476-4687}, mesh = {Aging/genetics ; Animals ; Biomedical Research ; *Cloud Computing/economics ; Datasets as Topic ; Dogs ; Genome/genetics ; Genomics/economics/*methods ; Humans ; Information Dissemination/*methods ; Internet ; Multicenter Studies as Topic/*methods ; National Human Genome Research Institute (U.S.) ; Pets/genetics ; *Software/economics ; United States ; Workflow ; }, } @article {pmid34978034, year = {2022}, author = {Ha, LT}, title = {Are digital business and digital public services a driver for better energy security? Evidence from a European sample.}, journal = {Environmental science and pollution research international}, volume = {29}, number = {18}, pages = {27232-27256}, pmid = {34978034}, issn = {1614-7499}, mesh = {*Commerce ; }, abstract = {This paper empirically analyses the impacts of the digital transformation process in the business and public sectors on energy security (ES). We employ 8 indicators to represent four aspects of energy security, including availability, acceptability, develop-ability, and sustainability. Digital businesses development is captured by e-Commerce (including e-Commerce sales, e-Commerce turnover, e-Commerce web sales) and e-Business (including customer relation management (CRM) usage and cloud usage). Digital public services development is reflected by business mobility and key enablers. Different econometric techniques are utilized in a database of 24 European Union countries from 2011 to 2019. Our estimation results demonstrate that digital businesses play a critical role in improving the acceptability and develop-ability of energy security, while digitalization in public services supports achieving energy sustainability goals. The use of modern digital technology such as big data, cloud computing is extremely important to ensure the security of the energy system, especially the availability of energy. For further discussion on the role of digital public services, we reveal a nonlinear association between digitalization in the public sector and energy intensity and energy consumption, suggesting the acceptability and develop-ability of energy security can be enhanced if the digital transformation process achieves a certain level.}, } @article {pmid34976326, year = {2021}, author = {Wang, B and Xu, L}, title = {Construction of the "Internet Plus" Community Smart Elderly Care Service Platform.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {4310648}, pmid = {34976326}, issn = {2040-2309}, mesh = {Aged ; *Aging ; Humans ; *Internet ; }, abstract = {With the rapid development of China's market economy and the increasing trend of population aging, the traditional community elderly care service model has exposed more and more problems, such as the imbalance between supply and demand, single service, and lack of flexibility. In response to these issues, this research attempts to explore the possible paths and practical challenges of applying the Internet, Internet of Things, mobile networks, big data, and cloud computing to community elderly care services. This research believes that the construction of the "Internet Plus" community smart elderly care services platform is a general trend. Innovating the traditional community elderly care service model is conducive to fully integrating elderly care resources and improving the quality of elderly care services.}, } @article {pmid34976046, year = {2021}, author = {Abd Elaziz, M and Abualigah, L and Ibrahim, RA and Attiya, I}, title = {IoT Workflow Scheduling Using Intelligent Arithmetic Optimization Algorithm in Fog Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2021}, number = {}, pages = {9114113}, pmid = {34976046}, issn = {1687-5273}, mesh = {Algorithms ; Cloud Computing ; Humans ; *Internet of Things ; Workflow ; }, abstract = {Instead of the cloud, the Internet of things (IoT) activities are offloaded into fog computing to boost the quality of services (QoSs) needed by many applications. However, the availability of continuous computing resources on fog computing servers is one of the restrictions for IoT applications since transmitting the large amount of data generated using IoT devices would create network traffic and cause an increase in computational overhead. Therefore, task scheduling is the main problem that needs to be solved efficiently. This study proposes an energy-aware model using an enhanced arithmetic optimization algorithm (AOA) method called AOAM, which addresses fog computing's job scheduling problem to maximize users' QoSs by maximizing the makespan measure. In the proposed AOAM, we enhanced the conventional AOA searchability using the marine predators algorithm (MPA) search operators to address the diversity of the used solutions and local optimum problems. The proposed AOAM is validated using several parameters, including various clients, data centers, hosts, virtual machines, tasks, and standard evaluation measures, including the energy and makespan. The obtained results are compared with other state-of-the-art methods; it showed that AOAM is promising and solved task scheduling effectively compared with the other comparative methods.}, } @article {pmid34972098, year = {2021}, author = {Xiao, Y and Wang, K and Liu, W and Peng, K and Wan, F}, title = {Research on rapier loom fault system based on cloud-side collaboration.}, journal = {PloS one}, volume = {16}, number = {12}, pages = {e0260888}, pmid = {34972098}, issn = {1932-6203}, mesh = {*Algorithms ; Bayes Theorem ; *Cloud Computing ; Neural Networks, Computer ; *Textiles ; }, abstract = {The electrical control system of rapier weaving machines is susceptible to various disturbances during operation and is prone to failures. This will seriously affect the production and a fault diagnosis system is needed to reduce this effect. However, the existing popular fault diagnosis systems and methods need to be improved due to the limitations of rapier weaving machine process and electrical characteristics. Based on this, this paper presents an in-depth study of rapier loom fault diagnosis system and proposes a rapier loom fault diagnosis method combining edge expert system and cloud-based rough set and Bayesian network. By analyzing the process and fault characteristics of rapier loom, the electrical faults of rapier loom are classified into common faults and other faults according to the frequency of occurrence. An expert system is built in the field for edge computing based on knowledge fault diagnosis experience to diagnose common loom faults and reduce the computing pressure in the cloud. Collect loom fault data in the cloud, train loom fault diagnosis algorithms to diagnose other faults, and handle other faults diagnosed by the expert system. The effectiveness of loom fault diagnosis is verified by on-site operation and remote monitoring of the loom human-machine interaction system. Technical examples are provided for the research of loom fault diagnosis system.}, } @article {pmid34960506, year = {2021}, author = {Lee, S and Yoon, D and Yeo, S and Oh, S}, title = {Mitigating Cold Start Problem in Serverless Computing with Function Fusion.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {24}, pages = {}, pmid = {34960506}, issn = {1424-8220}, support = {UD190033ED//Agency for Defense Development/ ; }, mesh = {*Artificial Intelligence ; *Software ; Workflow ; }, abstract = {As Artificial Intelligence (AI) is becoming ubiquitous in many applications, serverless computing is also emerging as a building block for developing cloud-based AI services. Serverless computing has received much interest because of its simplicity, scalability, and resource efficiency. However, due to the trade-off with resource efficiency, serverless computing suffers from the cold start problem, that is, a latency between a request arrival and function execution. The cold start problem significantly influences the overall response time of workflow that consists of functions because the cold start may occur in every function within the workflow. Function fusion can be one of the solutions to mitigate the cold start latency of a workflow. If two functions are fused into a single function, the cold start of the second function is removed; however, if parallel functions are fused, the workflow response time can be increased because the parallel functions run sequentially even if the cold start latency is reduced. This study presents an approach to mitigate the cold start latency of a workflow using function fusion while considering a parallel run. First, we identify three latencies that affect response time, present a workflow response time model considering the latency, and efficiently find a fusion solution that can optimize the response time on the cold start. Our method shows a response time of 28-86% of the response time of the original workflow in five workflows.}, } @article {pmid34960483, year = {2021}, author = {Salih, S and Hamdan, M and Abdelmaboud, A and Abdelaziz, A and Abdelsalam, S and Althobaiti, MM and Cheikhrouhou, O and Hamam, H and Alotaibi, F}, title = {Prioritising Organisational Factors Impacting Cloud ERP Adoption and the Critical Issues Related to Security, Usability, and Vendors: A Systematic Literature Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {24}, pages = {}, pmid = {34960483}, issn = {1424-8220}, mesh = {*Cloud Computing ; *Commerce ; }, abstract = {Cloud ERP is a type of enterprise resource planning (ERP) system that runs on the vendor's cloud platform instead of an on-premises network, enabling companies to connect through the Internet. The goal of this study was to rank and prioritise the factors driving cloud ERP adoption by organisations and to identify the critical issues in terms of security, usability, and vendors that impact adoption of cloud ERP systems. The assessment of critical success factors (CSFs) in on-premises ERP adoption and implementation has been well documented; however, no previous research has been carried out on CSFs in cloud ERP adoption. Therefore, the contribution of this research is to provide research and practice with the identification and analysis of 16 CSFs through a systematic literature review, where 73 publications on cloud ERP adoption were assessed from a range of different conferences and journals, using inclusion and exclusion criteria. Drawing from the literature, we found security, usability, and vendors were the top three most widely cited critical issues for the adoption of cloud-based ERP; hence, the second contribution of this study was an integrative model constructed with 12 drivers based on the security, usability, and vendor characteristics that may have greater influence as the top critical issues in the adoption of cloud ERP systems. We also identified critical gaps in current research, such as the inconclusiveness of findings related to security critical issues, usability critical issues, and vendor critical issues, by highlighting the most important drivers influencing those issues in cloud ERP adoption and the lack of discussion on the nature of the criticality of those CSFs. This research will aid in the development of new strategies or the revision of existing strategies and polices aimed at effectively integrating cloud ERP into cloud computing infrastructure. It will also allow cloud ERP suppliers to determine organisations' and business owners' expectations and implement appropriate tactics. A better understanding of the CSFs will narrow the field of failure and assist practitioners and managers in increasing their chances of success.}, } @article {pmid34960455, year = {2021}, author = {Bucur, V and Miclea, LC}, title = {Multi-Cloud Resource Management Techniques for Cyber-Physical Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {24}, pages = {}, pmid = {34960455}, issn = {1424-8220}, mesh = {*Autonomous Vehicles ; *Cloud Computing ; Humans ; Software ; }, abstract = {Information technology is based on data management between various sources. Software projects, as varied as simple applications or as complex as self-driving cars, are heavily reliant on the amounts, and types, of data ingested by one or more interconnected systems. Data is not only consumed but is transformed or mutated which requires copious amounts of computing resources. One of the most exciting areas of cyber-physical systems, autonomous vehicles, makes heavy use of deep learning and AI to mimic the highly complex actions of a human driver. Attempting to map human behavior (a large and abstract concept) requires large amounts of data, used by AIs to increase their knowledge and better attempt to solve complex problems. This paper outlines a full-fledged solution for managing resources in a multi-cloud environment. The purpose of this API is to accommodate ever-increasing resource requirements by leveraging the multi-cloud and using commercially available tools to scale resources and make systems more resilient while remaining as cloud agnostic as possible. To that effect, the work herein will consist of an architectural breakdown of the resource management API, a low-level description of the implementation and an experiment aimed at proving the feasibility, and applicability of the systems described.}, } @article {pmid34960384, year = {2021}, author = {Hameed, SS and Selamat, A and Abdul Latiff, L and Razak, SA and Krejcar, O and Fujita, H and Ahmad Sharif, MN and Omatu, S}, title = {A Hybrid Lightweight System for Early Attack Detection in the IoMT Fog.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {24}, pages = {}, pmid = {34960384}, issn = {1424-8220}, support = {FRGS/1/2018/ICT04/UTM/01/1//Ministry of Higher Education/ ; Vot 4L876//Ministry of Higher Education/ ; }, mesh = {Bayes Theorem ; Big Data ; Early Diagnosis ; *Internet of Things ; }, abstract = {Cyber-attack detection via on-gadget embedded models and cloud systems are widely used for the Internet of Medical Things (IoMT). The former has a limited computation ability, whereas the latter has a long detection time. Fog-based attack detection is alternatively used to overcome these problems. However, the current fog-based systems cannot handle the ever-increasing IoMT's big data. Moreover, they are not lightweight and are designed for network attack detection only. In this work, a hybrid (for host and network) lightweight system is proposed for early attack detection in the IoMT fog. In an adaptive online setting, six different incremental classifiers were implemented, namely a novel Weighted Hoeffding Tree Ensemble (WHTE), Incremental K-Nearest Neighbors (IKNN), Incremental Naïve Bayes (INB), Hoeffding Tree Majority Class (HTMC), Hoeffding Tree Naïve Bayes (HTNB), and Hoeffding Tree Naïve Bayes Adaptive (HTNBA). The system was benchmarked with seven heterogeneous sensors and a NetFlow data infected with nine types of recent attack. The results showed that the proposed system worked well on the lightweight fog devices with ~100% accuracy, a low detection time, and a low memory usage of less than 6 MiB. The single-criteria comparative analysis showed that the WHTE ensemble was more accurate and was less sensitive to the concept drift.}, } @article {pmid34960320, year = {2021}, author = {Alwakeel, AM}, title = {An Overview of Fog Computing and Edge Computing Security and Privacy Issues.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {24}, pages = {}, pmid = {34960320}, issn = {1424-8220}, mesh = {*Cloud Computing ; *Privacy ; }, abstract = {With the advancement of different technologies such as 5G networks and IoT the use of different cloud computing technologies became essential. Cloud computing allowed intensive data processing and warehousing solution. Two different new cloud technologies that inherit some of the traditional cloud computing paradigm are fog computing and edge computing that is aims to simplify some of the complexity of cloud computing and leverage the computing capabilities within the local network in order to preform computation tasks rather than carrying it to the cloud. This makes this technology fits with the properties of IoT systems. However, using such technology introduces several new security and privacy challenges that could be huge obstacle against implementing these technologies. In this paper, we survey some of the main security and privacy challenges that faces fog and edge computing illustrating how these security issues could affect the work and implementation of edge and fog computing. Moreover, we present several countermeasures to mitigate the effect of these security issues.}, } @article {pmid34956357, year = {2021}, author = {Xie, P and Ma, E and Xu, Z}, title = {Cloud Computing Image Recognition System Assists the Construction of the Internet of Things Model of Administrative Management Event Parameters.}, journal = {Computational intelligence and neuroscience}, volume = {2021}, number = {}, pages = {8630256}, pmid = {34956357}, issn = {1687-5273}, mesh = {Algorithms ; *Cloud Computing ; *Internet of Things ; Software ; }, abstract = {In order to successfully apply the Internet of Things and cloud computing to the administrative management of spatial structures and realize the systematization, digitization, and intelligence of administrative management, this article draws on research experience in related fields and considers the data characteristics and computing tasks of administrative management. The whole cycle of transmission, storage, postprocessing, and visualization is the main line of research, and a cloud computing-based spatial structure administrative management IoT system is constructed. First, by summarizing the application status of the Internet of Things, the general Internet of Things system is summarized into three levels, and combined with the specific work in the spatial structure administrative management, the overall framework of the spatial structure administrative management of the Internet of Things system is proposed, and the functional sublayers are carried out. Secondly, in response to the above problems, through the traditional image recognition system research and practical application investigation, in order to meet the user's requirements for the computing efficiency and recognition accuracy of the image recognition system, an image recognition system in the cloud computing environment is proposed. It proposes a fuzzy evaluation algorithm of health grade hierarchy analysis optimized for the index system and scoring system and a calculation method that uses time series to identify regular outliers. The optical image pixel-level fusion method and the infrared and visible image fusion method based on complementary information are proposed, and the image fusion software is developed. Finally, in order to enable the application layer to use cluster resources to efficiently and intelligently process massive monitoring data containing redundancy, heterogeneity, anomalies, and many other defects, according to the calculation process of each specific task of data preprocessing and postprocessing in the application layer, demonstrations are made one by one. After analysis, it is concluded that vertical storage of data blocks according to different sensor channels is the optimal strategy.}, } @article {pmid34951597, year = {2021}, author = {Yang, JS and Cuomo, RE and Purushothaman, V and Nali, M and Shah, N and Bardier, C and Obradovich, N and Mackey, T}, title = {Campus Smoking Policies and Smoking-Related Twitter Posts Originating From California Public Universities: Retrospective Study.}, journal = {JMIR formative research}, volume = {5}, number = {12}, pages = {e33331}, pmid = {34951597}, issn = {2561-326X}, abstract = {BACKGROUND: The number of colleges and universities with smoke- or tobacco-free campus policies has been increasing. The effects of campus smoking policies on overall sentiment, particularly among young adult populations, are more difficult to assess owing to the changing tobacco and e-cigarette product landscape and differential attitudes toward policy implementation and enforcement.

OBJECTIVE: The goal of the study was to retrospectively assess the campus climate toward tobacco use by comparing tweets from California universities with and those without smoke- or tobacco-free campus policies.

METHODS: Geolocated Twitter posts from 2015 were collected using the Twitter public application programming interface in combination with cloud computing services on Amazon Web Services. Posts were filtered for tobacco products and behavior-related keywords. A total of 42,877,339 posts were collected from 2015, with 2837 originating from a University of California or California State University system campus, and 758 of these manually verified as being about smoking. Chi-square tests were conducted to determine if there were significant differences in tweet user sentiments between campuses that were smoke- or tobacco-free (all University of California campuses and California State University, Fullerton) compared to those that were not. A separate content analysis of tweets included in chi-square tests was conducted to identify major themes by campus smoking policy status.

RESULTS: The percentage of positive sentiment tweets toward tobacco use was higher on campuses without a smoke- or tobacco-free campus policy than on campuses with a smoke- or tobacco-free campus policy (76.7% vs 66.4%, P=.03). Higher positive sentiment on campuses without a smoke- or tobacco-free campus policy may have been driven by general comments about one's own smoking behavior and comments about smoking as a general behavior. Positive sentiment tweets originating from campuses without a smoke- or tobacco-free policy had greater variation in tweet type, which may have also contributed to differences in sentiment among universities.

CONCLUSIONS: Our study introduces preliminary data suggesting that campus smoke- and tobacco-free policies are associated with a reduction in positive sentiment toward smoking. However, continued expressions and intentions to smoke and reports of one's own smoking among Twitter users suggest a need for more research to better understand the dynamics between implementation of smoke- and tobacco-free policies and resulting tobacco behavioral sentiment.}, } @article {pmid34948885, year = {2021}, author = {Garcés-Jiménez, A and Calderón-Gómez, H and Gómez-Pulido, JM and Gómez-Pulido, JA and Vargas-Lombardo, M and Castillo-Sequera, JL and Aguirre, MP and Sanz-Moreno, J and Polo-Luque, ML and Rodríguez-Puyol, D}, title = {Medical Prognosis of Infectious Diseases in Nursing Homes by Applying Machine Learning on Clinical Data Collected in Cloud Microservices.}, journal = {International journal of environmental research and public health}, volume = {18}, number = {24}, pages = {}, pmid = {34948885}, issn = {1660-4601}, mesh = {Aged ; *Biomedical Research ; Cloud Computing ; *Communicable Diseases/diagnosis/epidemiology ; Humans ; Machine Learning ; Nursing Homes ; }, abstract = {BACKGROUND: treating infectious diseases in elderly individuals is difficult; patient referral to emergency services often occurs, since the elderly tend to arrive at consultations with advanced, serious symptoms.

AIM: it was hypothesized that anticipating an infectious disease diagnosis by a few days could significantly improve a patient's well-being and reduce the burden on emergency health system services.

METHODS: vital signs from residents were taken daily and transferred to a database in the cloud. Classifiers were used to recognize patterns in the spatial domain process of the collected data. Doctors reported their diagnoses when any disease presented. A flexible microservice architecture provided access and functionality to the system.

RESULTS: combining two different domains, health and technology, is not easy, but the results are encouraging. The classifiers reported good results; the system has been well accepted by medical personnel and is proving to be cost-effective and a good solution to service disadvantaged areas. In this context, this research found the importance of certain clinical variables in the identification of infectious diseases.

CONCLUSIONS: this work explores how to apply mobile communications, cloud services, and machine learning technology, in order to provide efficient tools for medical staff in nursing homes. The scalable architecture can be extended to big data applications that may extract valuable knowledge patterns for medical research.}, } @article {pmid34941535, year = {2022}, author = {Qiu, J and Yan, X and Wang, W and Wei, W and Fang, K}, title = {Skeleton-Based Abnormal Behavior Detection Using Secure Partitioned Convolutional Neural Network Model.}, journal = {IEEE journal of biomedical and health informatics}, volume = {26}, number = {12}, pages = {5829-5840}, doi = {10.1109/JBHI.2021.3137334}, pmid = {34941535}, issn = {2168-2208}, mesh = {Humans ; *Neural Networks, Computer ; *Privacy ; Computer Security ; Skeleton ; }, abstract = {Theabnormal behavior detection is the vital for evaluation of daily-life health status of the patient with cognitive impairment. Previous studies about abnormal behavior detection indicate that convolution neural network (CNN)-based computer vision owns the high robustness and accuracy for detection. However, executing CNN model on the cloud possible incurs a privacy disclosure problem during data transmission, and the high computation overhead makes difficult to execute the model on edge-end IoT devices with a well real-time performance. In this paper, we realize a skeleton-based abnormal behavior detection, and propose a secure partitioned CNN model (SP-CNN) to extract human skeleton keypoints and achieve safely collaborative computing by deploying different CNN model layers on the cloud and the IoT device. Because, the data outputted from the IoT device are processed by the several CNN layers instead of transmitting the sensitive video data, objectively it reduces the risk of privacy disclosure. Moreover, we also design an encryption method based on channel state information (CSI) to guarantee the sensitive data security. At last, we apply SP-CNN in abnormal behavior detection to evaluate its effectiveness. The experiment results illustrate that the efficiency of the abnormal behavior detection based on SP-CNN is at least 33.2% higher than the state-of-the-art methods, and its detection accuracy arrives to 97.54%.}, } @article {pmid34939144, year = {2021}, author = {Lu, ZX and Qian, P and Bi, D and Ye, ZW and He, X and Zhao, YH and Su, L and Li, SL and Zhu, ZL}, title = {Application of AI and IoT in Clinical Medicine: Summary and Challenges.}, journal = {Current medical science}, volume = {41}, number = {6}, pages = {1134-1150}, pmid = {34939144}, issn = {2523-899X}, mesh = {Algorithms ; Artificial Intelligence/*trends ; *Big Data ; Clinical Medicine/*trends ; Cloud Computing/*trends ; Humans ; Internet of Things/*trends ; Machine Learning ; }, abstract = {The application of artificial intelligence (AI) technology in the medical field has experienced a long history of development. In turn, some long-standing points and challenges in the medical field have also prompted diverse research teams to continue to explore AI in depth. With the development of advanced technologies such as the Internet of Things (IoT), cloud computing, big data, and 5G mobile networks, AI technology has been more widely adopted in the medical field. In addition, the in-depth integration of AI and IoT technology enables the gradual improvement of medical diagnosis and treatment capabilities so as to provide services to the public in a more effective way. In this work, we examine the technical basis of IoT, cloud computing, big data analysis and machine learning involved in clinical medicine, combined with concepts of specific algorithms such as activity recognition, behavior recognition, anomaly detection, assistant decision-making system, to describe the scenario-based applications of remote diagnosis and treatment collaboration, neonatal intensive care unit, cardiology intensive care unit, emergency first aid, venous thromboembolism, monitoring nursing, image-assisted diagnosis, etc. We also systematically summarize the application of AI and IoT in clinical medicine, analyze the main challenges thereof, and comment on the trends and future developments in this field.}, } @article {pmid34938329, year = {2021}, author = {Siam, AI and Almaiah, MA and Al-Zahrani, A and Elazm, AA and El Banby, GM and El-Shafai, W and El-Samie, FEA and El-Bahnasawy, NA}, title = {Secure Health Monitoring Communication Systems Based on IoT and Cloud Computing for Medical Emergency Applications.}, journal = {Computational intelligence and neuroscience}, volume = {2021}, number = {}, pages = {8016525}, pmid = {34938329}, issn = {1687-5273}, mesh = {*Cloud Computing ; Communication ; Humans ; *Internet of Things ; Oxygen Saturation ; Reproducibility of Results ; }, abstract = {Smart health surveillance technology has attracted wide attention between patients and professionals or specialists to provide early detection of critical abnormal situations without the need to be in direct contact with the patient. This paper presents a secure smart monitoring portable multivital signal system based on Internet-of-Things (IoT) technology. The implemented system is designed to measure the key health parameters: heart rate (HR), blood oxygen saturation (SpO2), and body temperature, simultaneously. The captured physiological signals are processed and encrypted using the Advanced Encryption Standard (AES) algorithm before sending them to the cloud. An ESP8266 integrated unit is used for processing, encryption, and providing connectivity to the cloud over Wi-Fi. On the other side, trusted medical organization servers receive and decrypt the measurements and display the values on the monitoring dashboard for the authorized specialists. The proposed system measurements are compared with a number of commercial medical devices. Results demonstrate that the measurements of the proposed system are within the 95% confidence interval. Moreover, Root Mean Squared Error (RMSE), Mean Absolute Error (MAE), and Mean Relative Error (MRE) for the proposed system are calculated as 1.44, 1.12, and 0.012, respectively, for HR, 1.13, 0.92, and 0.009, respectively, for SpO2, and 0.13, 0.11, and 0.003, respectively, for body temperature. These results demonstrate the high accuracy and reliability of the proposed system.}, } @article {pmid34934784, year = {2021}, author = {Abdul Hadi, M and Schmid, J and Trabesinger, S and Brillinger, M}, title = {High-frequency machine datasets captured via Edge Device from Spinner U5-630 milling machine.}, journal = {Data in brief}, volume = {39}, number = {}, pages = {107670}, pmid = {34934784}, issn = {2352-3409}, abstract = {The high-frequency (HF) machine data is retrieved from the Spinner U5-630 milling machine via an Edge Device. Unlike cloud computing, an Edge Device refers to distributed data processing of devices in proximity that generate data, which can thereby be used for analysis [1,2]. This data has a sampling rate of 2ms and hence, a frequency of 500Hz. The HF machine data is from various experiments performed. There are 2 experiments performed (parts 1 and 2). The experimented part 1 has 12 .json data files and part 2 has 11 .json files. In total, there are 23 files of HF machine data from 23 experiments. The HF machine data has vast potential for analysis as it contains all the information from the machine during the machining process. One part of the information was used in our case to calculate the energy consumption of the machine. Similarly, the data can be used for retrieving information of torque, commanded and actual speed, NC code, current, etc.}, } @article {pmid34919694, year = {2022}, author = {Kahn, MG and Mui, JY and Ames, MJ and Yamsani, AK and Pozdeyev, N and Rafaels, N and Brooks, IM}, title = {Migrating a research data warehouse to a public cloud: challenges and opportunities.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {29}, number = {4}, pages = {592-600}, pmid = {34919694}, issn = {1527-974X}, support = {UL1 TR002535/TR/NCATS NIH HHS/United States ; }, mesh = {*Cloud Computing ; *Data Warehousing ; }, abstract = {OBJECTIVE: Clinical research data warehouses (RDWs) linked to genomic pipelines and open data archives are being created to support innovative, complex data-driven discoveries. The computing and storage needs of these research environments may quickly exceed the capacity of on-premises systems. New RDWs are migrating to cloud platforms for the scalability and flexibility needed to meet these challenges. We describe our experience in migrating a multi-institutional RDW to a public cloud.

MATERIALS AND METHODS: This study is descriptive. Primary materials included internal and public presentations before and after the transition, analysis documents, and actual billing records. Findings were aggregated into topical categories.

RESULTS: Eight categories of migration issues were identified. Unanticipated challenges included legacy system limitations; network, computing, and storage architectures that realize performance and cost benefits in the face of hyper-innovation, complex security reviews and approvals, and limited cloud consulting expertise.

DISCUSSION: Cloud architectures enable previously unavailable capabilities, but numerous pitfalls can impede realizing the full benefits of a cloud environment. Rapid changes in cloud capabilities can quickly obsolete existing architectures and associated institutional policies. Touchpoints with on-premise networks and systems can add unforeseen complexity. Governance, resource management, and cost oversight are critical to allow rapid innovation while minimizing wasted resources and unnecessary costs.

CONCLUSIONS: Migrating our RDW to the cloud has enabled capabilities and innovations that would not have been possible with an on-premises environment. Notwithstanding the challenges of managing cloud resources, the resulting RDW capabilities have been highly positive to our institution, research community, and partners.}, } @article {pmid34917585, year = {2021}, author = {Pandya, S and Sur, A and Solke, N}, title = {COVIDSAVIOR: A Novel Sensor-Fusion and Deep Learning Based Framework for Virus Outbreaks.}, journal = {Frontiers in public health}, volume = {9}, number = {}, pages = {797808}, pmid = {34917585}, issn = {2296-2565}, mesh = {Algorithms ; *COVID-19 ; *Deep Learning ; Disease Outbreaks/prevention & control ; Humans ; Masks ; }, abstract = {The presented deep learning and sensor-fusion based assistive technology (Smart Facemask and Thermal scanning kiosk) will protect the individual using auto face-mask detection and auto thermal scanning to detect the current body temperature. Furthermore, the presented system also facilitates a variety of notifications, such as an alarm, if an individual is not wearing a mask and detects thermal temperature beyond the standard body temperature threshold, such as 98.6°F (37°C). Design/methodology/approach-The presented deep Learning and sensor-fusion-based approach can also detect an individual in with or without mask situations and provide appropriate notification to the security personnel by raising the alarm. Moreover, the smart tunnel is also equipped with a thermal sensing unit embedded with a camera, which can detect the real-time body temperature of an individual concerning the prescribed body temperature limits as prescribed by WHO reports. Findings-The investigation results validate the performance evaluation of the presented smart face-mask and thermal scanning mechanism. The presented system can also detect an outsider entering the building with or without mask condition and be aware of the security control room by raising appropriate alarms. Furthermore, the presented smart epidemic tunnel is embedded with an intelligent algorithm that can perform real-time thermal scanning of an individual and store essential information in a cloud platform, such as Google firebase. Thus, the proposed system favors society by saving time and helps in lowering the spread of coronavirus.}, } @article {pmid34914924, year = {2022}, author = {Iregbu, K and Dramowski, A and Milton, R and Nsutebu, E and Howie, SRC and Chakraborty, M and Lavoie, PM and Costelloe, CE and Ghazal, P}, title = {Global health systems' data science approach for precision diagnosis of sepsis in early life.}, journal = {The Lancet. Infectious diseases}, volume = {22}, number = {5}, pages = {e143-e152}, doi = {10.1016/S1473-3099(21)00645-9}, pmid = {34914924}, issn = {1474-4457}, support = {G0701289/MRC_/Medical Research Council/United Kingdom ; K43 TW010682/TW/FIC NIH HHS/United States ; }, mesh = {Artificial Intelligence ; Child ; *Data Science ; Developing Countries ; Global Health ; Humans ; Infant, Newborn ; *Sepsis/diagnosis ; }, abstract = {Neonates and children in low-income and middle-income countries (LMICs) contribute to the highest number of sepsis-associated deaths globally. Interventions to prevent sepsis mortality are hampered by a lack of comprehensive epidemiological data and pathophysiological understanding of biological pathways. In this review, we discuss the challenges faced by LMICs in diagnosing sepsis in these age groups. We highlight a role for multi-omics and health care data to improve diagnostic accuracy of clinical algorithms, arguing that health-care systems urgently need precision medicine to avoid the pitfalls of missed diagnoses, misdiagnoses, and overdiagnoses, and associated antimicrobial resistance. We discuss ethical, regulatory, and systemic barriers related to the collection and use of big data in LMICs. Technologies such as cloud computing, artificial intelligence, and medical tricorders might help, but they require collaboration with local communities. Co-partnering (joint equal development of technology between producer and end-users) could facilitate integration of these technologies as part of future care-delivery systems, offering a chance to transform the global management and prevention of sepsis for neonates and children.}, } @article {pmid34914923, year = {2022}, author = {Keddy, KH and Saha, S and Kariuki, S and Kalule, JB and Qamar, FN and Haq, Z and Okeke, IN}, title = {Using big data and mobile health to manage diarrhoeal disease in children in low-income and middle-income countries: societal barriers and ethical implications.}, journal = {The Lancet. Infectious diseases}, volume = {22}, number = {5}, pages = {e130-e142}, doi = {10.1016/S1473-3099(21)00585-5}, pmid = {34914923}, issn = {1474-4457}, support = {MR/L00464X/1/MRC_/Medical Research Council/United Kingdom ; R01 AI099525/AI/NIAID NIH HHS/United States ; 215675/Z/19/Z/WT_/Wellcome Trust/United Kingdom ; }, mesh = {Big Data ; *Developing Countries ; Diarrhea/prevention & control/therapy ; Humans ; Poverty ; *Telemedicine ; }, abstract = {Diarrhoea is an important cause of morbidity and mortality in children from low-income and middle-income countries (LMICs), despite advances in the management of this condition. Understanding of the causes of diarrhoea in children in LMICs has advanced owing to large multinational studies and big data analytics computing the disease burden, identifying the important variables that have contributed to reducing this burden. The advent of the mobile phone has further enabled the management of childhood diarrhoea by providing both clinical support to health-care workers (such as diagnosis and management) and communicating preventive measures to carers (such as breastfeeding and vaccination reminders) in some settings. There are still challenges in addressing the burden of diarrhoeal diseases, such as incomplete patient information, underrepresented geographical areas, concerns about patient confidentiality, unequal partnerships between study investigators, and the reactive approach to outbreaks. A transparent approach to promote the inclusion of researchers in LMICs could address partnership imbalances. A big data umbrella encompassing cloud-based centralised databases to analyse interlinked human, animal, agricultural, social, and climate data would provide an informative solution to the development of appropriate management protocols in LMICs.}, } @article {pmid34912536, year = {2021}, author = {Lin, B and Huang, W}, title = {A Study of Cloud-Based Remote Clinical Care Technology.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {8024091}, pmid = {34912536}, issn = {2040-2309}, mesh = {*Cloud Computing ; Delivery of Health Care ; Humans ; *Software ; Surveys and Questionnaires ; Technology ; }, abstract = {This paper uses cloud computing to build and design remote clinical care technology, and the study refines the evaluation approach for the elements and builds an evaluation prototype for the strategy, uses service design theory to improve the design of the service part of the assistive system, summarizes the list of requirements based on system design and service design, and designs a service design prototype. Through design practice, the detailed design of the software interaction interface and the auxiliary product of the care assistance system based on the prototype are investigated. Based on the user perspective, the strategy of meeting user expectations and improving user information literacy is proposed; based on the social network perspective, the strategy of establishing a long-term mechanism for smart medical operation and improving the information interaction network environment is proposed; and based on the system service perspective, the strategy of optimizing the system function design and innovating the service model is proposed. Compared with the traditional written patient handover, the application of MNIS under cloud computing can significantly shorten the handover time of surgical patients, improve the standardized rate of surgical safety verification execution and the qualified rate of nursing documents, while the rate of standardized application of prophylactic antibiotics is also significantly higher than that of the control group. The questionnaire survey of nursing staff in the operating room showed that clinical nursing staff was generally satisfied with the clinical application of MNIS under cloud computing, with an average satisfaction score of 64.5 to 11.3, and an average score of 3.58 to 0.54 for each item. Among them, pre-application training of MNIS, departmental support for MNIS, and its ease of verification in surgical patients were the three main factors favoring the clinical application of MNIS in the operating room with cloud computing, while barriers to wireless network connectivity, inconvenient PDA input, and small screen size were the three main drawbacks affecting its application. The determined clinical evaluation index system of MNIS in the operating room is innovative, which not only includes clinical care indicators but also covers general hardware and software indicators, which can effectively reflect the practical application capability of mobile terminal clinical and user experience feelings, and the evaluation index system is comprehensive.}, } @article {pmid34910757, year = {2021}, author = {Byrne, M and O'Malley, L and Glenny, AM and Pretty, I and Tickle, M}, title = {Assessing the reliability of automatic sentiment analysis tools on rating the sentiment of reviews of NHS dental practices in England.}, journal = {PloS one}, volume = {16}, number = {12}, pages = {e0259797}, pmid = {34910757}, issn = {1932-6203}, mesh = {*Artificial Intelligence ; Automation ; Dental Care/*standards ; Humans ; Internet ; National Health Programs ; United Kingdom ; }, abstract = {BACKGROUND: Online reviews may act as a rich source of data to assess the quality of dental practices. Assessing the content and sentiment of reviews on a large scale is time consuming and expensive. Automation of the process of assigning sentiment to big data samples of reviews may allow for reviews to be used as Patient Reported Experience Measures for primary care dentistry.

AIM: To assess the reliability of three different online sentiment analysis tools (Amazon Comprehend DetectSentiment API (ACDAPI), Google and Monkeylearn) at assessing the sentiment of reviews of dental practices working on National Health Service contracts in the United Kingdom.

METHODS: A Python 3 script was used to mine 15800 reviews from 4803 unique dental practices on the NHS.uk websites between April 2018 -March 2019. A random sample of 270 reviews were rated by the three sentiment analysis tools. These reviews were rated by 3 blinded independent human reviewers and a pooled sentiment score was assigned. Kappa statistics and polychoric evalutaiton were used to assess the level of agreement. Disagreements between the automated and human reviewers were qualitatively assessed.

RESULTS: There was good agreement between the sentiment assigned to reviews by the human reviews and ACDAPI (k = 0.660). The Google (k = 0.706) and Monkeylearn (k = 0.728) showed slightly better agreement at the expense of usability on a massive dataset. There were 33 disagreements in rating between ACDAPI and human reviewers, of which n = 16 were due to syntax errors, n = 10 were due to misappropriation of the strength of conflicting emotions and n = 7 were due to a lack of overtly emotive language in the text.

CONCLUSIONS: There is good agreement between the sentiment of an online review assigned by a group of humans and by cloud-based sentiment analysis. This may allow the use of automated sentiment analysis for quality assessment of dental service provision in the NHS.}, } @article {pmid34906327, year = {2021}, author = {Halder, A and Verma, A and Biswas, D and Srivastava, S}, title = {Recent advances in mass-spectrometry based proteomics software, tools and databases.}, journal = {Drug discovery today. Technologies}, volume = {39}, number = {}, pages = {69-79}, doi = {10.1016/j.ddtec.2021.06.007}, pmid = {34906327}, issn = {1740-6749}, mesh = {Algorithms ; Databases, Factual ; Databases, Protein ; Mass Spectrometry ; *Proteomics ; *Software ; }, abstract = {The field of proteomics immensely depends on data generation and data analysis which are thoroughly supported by software and databases. There has been a massive advancement in mass spectrometry-based proteomics over the last 10 years which has compelled the scientific community to upgrade or develop algorithms, tools, and repository databases in the field of proteomics. Several standalone software, and comprehensive databases have aided the establishment of integrated omics pipeline and meta-analysis workflow which has contributed to understand the disease pathobiology, biomarker discovery and predicting new therapeutic modalities. For shotgun proteomics where Data Dependent Acquisition is performed, several user-friendly software are developed that can analyse the pre-processed data to provide mechanistic insights of the disease. Likewise, in Data Independent Acquisition, pipelines are emerged which can accomplish the task from building the spectral library to identify the therapeutic targets. Furthermore, in the age of big data analysis the implications of machine learning and cloud computing are appending robustness, rapidness and in-depth proteomics data analysis. The current review talks about the recent advancement, and development of software, tools, and database in the field of mass-spectrometry based proteomics.}, } @article {pmid34906321, year = {2021}, author = {Frye, L and Bhat, S and Akinsanya, K and Abel, R}, title = {From computer-aided drug discovery to computer-driven drug discovery.}, journal = {Drug discovery today. Technologies}, volume = {39}, number = {}, pages = {111-117}, doi = {10.1016/j.ddtec.2021.08.001}, pmid = {34906321}, issn = {1740-6749}, mesh = {*Artificial Intelligence ; Computer-Aided Design ; Computers ; Drug Design ; *Drug Discovery ; Machine Learning ; Proteins ; }, abstract = {Computational chemistry and structure-based design have traditionally been viewed as a subset of tools that could aid acceleration of the drug discovery process, but were not commonly regarded as a driving force in small molecule drug discovery. In the last decade however, there have been dramatic advances in the field, including (1) development of physics-based computational approaches to accurately predict a broad variety of endpoints from potency to solubility, (2) improvements in artificial intelligence and deep learning methods and (3) dramatic increases in computational power with the advent of GPUs and cloud computing, resulting in the ability to explore and accurately profile vast amounts of drug-like chemical space in silico. There have also been simultaneous advancements in structural biology such as cryogenic electron microscopy (cryo-EM) and computational protein-structure prediction, allowing for access to many more high-resolution 3D structures of novel drug-receptor complexes. The convergence of these breakthroughs has positioned structurally-enabled computational methods to be a driving force behind the discovery of novel small molecule therapeutics. This review will give a broad overview of the synergies in recent advances in the fields of computational chemistry, machine learning and structural biology, in particular in the areas of hit identification, hit-to-lead, and lead optimization.}, } @article {pmid34902160, year = {2022}, author = {Rowe, SP and Pomper, MG}, title = {Molecular imaging in oncology: Current impact and future directions.}, journal = {CA: a cancer journal for clinicians}, volume = {72}, number = {4}, pages = {333-352}, pmid = {34902160}, issn = {1542-4863}, support = {R01 CA184228/CA/NCI NIH HHS/United States ; R01 CA134675/CA/NCI NIH HHS/United States ; P41 EB024495/EB/NIBIB NIH HHS/United States ; }, mesh = {Animals ; Humans ; Magnetic Resonance Imaging ; *Medical Oncology ; *Molecular Imaging/methods ; Positron-Emission Tomography ; }, abstract = {The authors define molecular imaging, according to the Society of Nuclear Medicine and Molecular Imaging, as the visualization, characterization, and measurement of biological processes at the molecular and cellular levels in humans and other living systems. Although practiced for many years clinically in nuclear medicine, expansion to other imaging modalities began roughly 25 years ago and has accelerated since. That acceleration derives from the continual appearance of new and highly relevant animal models of human disease, increasingly sensitive imaging devices, high-throughput methods to discover and optimize affinity agents to key cellular targets, new ways to manipulate genetic material, and expanded use of cloud computing. Greater interest by scientists in allied fields, such as chemistry, biomedical engineering, and immunology, as well as increased attention by the pharmaceutical industry, have likewise contributed to the boom in activity in recent years. Whereas researchers and clinicians have applied molecular imaging to a variety of physiologic processes and disease states, here, the authors focus on oncology, arguably where it has made its greatest impact. The main purpose of imaging in oncology is early detection to enable interception if not prevention of full-blown disease, such as the appearance of metastases. Because biochemical changes occur before changes in anatomy, molecular imaging-particularly when combined with liquid biopsy for screening purposes-promises especially early localization of disease for optimum management. Here, the authors introduce the ways and indications in which molecular imaging can be undertaken, the tools used and under development, and near-term challenges and opportunities in oncology.}, } @article {pmid34902120, year = {2022}, author = {Calabrese, B}, title = {Web and Cloud Computing to Analyze Microarray Data.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2401}, number = {}, pages = {29-38}, pmid = {34902120}, issn = {1940-6029}, mesh = {*Cloud Computing ; Information Storage and Retrieval ; Internet ; Microarray Analysis ; *Software ; }, abstract = {Microarray technology is a high-throughput technique that can simultaneously measure hundreds of thousands of genes' expression levels. Web and cloud computing tools and databases for storage and analysis of microarray data are necessary for biologists to interpret massive data from experiments. This chapter presents the main databases and web and cloud computing tools for microarray data storage and analysis.}, } @article {pmid34902119, year = {2022}, author = {Marozzo, F and Belcastro, L}, title = {High-Performance Framework to Analyze Microarray Data.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2401}, number = {}, pages = {13-27}, pmid = {34902119}, issn = {1940-6029}, mesh = {Algorithms ; *Computational Biology ; Computing Methodologies ; Genome ; Humans ; *Microarray Analysis ; Software ; }, abstract = {Pharmacogenomics is an important research field that studies the impact of genetic variation of patients on drug responses, looking for correlations between single nucleotide polymorphisms (SNPs) of patient genome and drug toxicity or efficacy. The large number of available samples and the high resolution of the instruments allow microarray platforms to produce huge amounts of SNP data. To analyze such data and find correlations in a reasonable time, high-performance computing solutions must be used. Cloud4SNP is a bioinformatics tool, based on Data Mining Cloud Framework (DMCF), for parallel preprocessing and statistical analysis of SNP pharmacogenomics microarray data.This work describes how Cloud4SNP has been extended to execute applications on Apache Spark, which provides faster execution time for iterative and batch processing. The experimental evaluation shows that Cloud4SNP is able to exploit the high-performance features of Apache Spark, obtaining faster execution times and high level of scalability, with a global speedup that is very close to linear values.}, } @article {pmid34900023, year = {2021}, author = {Tiwari, A and Dhiman, V and Iesa, MAM and Alsarhan, H and Mehbodniya, A and Shabaz, M}, title = {Patient Behavioral Analysis with Smart Healthcare and IoT.}, journal = {Behavioural neurology}, volume = {2021}, number = {}, pages = {4028761}, pmid = {34900023}, issn = {1875-8584}, mesh = {Artificial Intelligence ; Delivery of Health Care ; Humans ; *Internet of Things ; }, abstract = {Patient behavioral analysis is the key factor for providing treatment to patients who may suffer from various difficulties including neurological disease, head trauma, and mental disease. Analyzing the patient's behavior helps in determining the root cause of the disease. In traditional healthcare, patient behavioral analysis has lots of challenges that were much more difficult. The patient behavior can be easily analyzed with the development of smart healthcare. Information technology plays a key role in understanding the concept of smart healthcare. A new generation of information technologies including IoT and cloud computing is used for changing the traditional healthcare system in all ways. Using Internet of Things in the healthcare institution enhances the effectiveness as well as makes it more personalized and convenient to the patients. The first thing that will be discussed in the article is the technologies that have been used to support the smart class, and further, there will be a discussion on the existing problems with the smart healthcare system and how these problems can be solved. This study can provide essential information about the role of smart healthcare and IoT in maintaining behavior of patent. Various biomarkers are maintained properly with the help of these technologies. This study can provide effective information about importance of smart health system. This smart healthcare is conducted with the involvement of proper architecture. This is treated as effective energy efficiency architecture. Artificial intelligence is used increasingly in healthcare to maintain diagnosis and other important factors of healthcare. This application is also used to maintain patient engagement, which is also included in this study. Major hardware components are also included in this technology such as CO sensor and CO2 sensor.}, } @article {pmid34899960, year = {2022}, author = {ElAraby, ME and Elzeki, OM and Shams, MY and Mahmoud, A and Salem, H}, title = {A novel Gray-Scale spatial exploitation learning Net for COVID-19 by crawling Internet resources.}, journal = {Biomedical signal processing and control}, volume = {73}, number = {}, pages = {103441}, pmid = {34899960}, issn = {1746-8094}, abstract = {Today, the earth planet suffers from the decay of active pandemic COVID-19 which motivates scientists and researchers to detect and diagnose the infected people. Chest X-ray (CXR) image is a common utility tool for detection. Even the CXR suffers from low informative details about COVID-19 patches; the computer vision helps to overcome it through grayscale spatial exploitation analysis. In turn, it is highly recommended to acquire more CXR images to increase the capacity and ability to learn for mining the grayscale spatial exploitation. In this paper, an efficient Gray-scale Spatial Exploitation Net (GSEN) is designed by employing web pages crawling across cloud computing environments. The motivation of this work are i) utilizing a framework methodology for constructing consistent dataset by web crawling to update the dataset continuously per crawling iteration; ii) designing lightweight, fast learning, comparable accuracy, and fine-tuned parameters gray-scale spatial exploitation deep neural net; iii) comprehensive evaluation of the designed gray-scale spatial exploitation net for different collected dataset(s) based on web COVID-19 crawling verse the transfer learning of the pre-trained nets. Different experiments have been performed for benchmarking both the proposed web crawling framework methodology and the designed gray-scale spatial exploitation net. Due to the accuracy metric, the proposed net achieves 95.60% for two-class labels, and 92.67% for three-class labels, respectively compared with the most recent transfer learning Google-Net, VGG-19, Res-Net 50, and Alex-Net approaches. Furthermore, web crawling utilizes the accuracy rates improvement in a positive relationship to the cardinality of crawled CXR dataset.}, } @article {pmid34898797, year = {2022}, author = {Subramanian, M and Shanmuga Vadivel, K and Hatamleh, WA and Alnuaim, AA and Abdelhady, M and V E, S}, title = {The role of contemporary digital tools and technologies in COVID-19 crisis: An exploratory analysis.}, journal = {Expert systems}, volume = {39}, number = {6}, pages = {e12834}, pmid = {34898797}, issn = {1468-0394}, abstract = {Following the COVID-19 pandemic, there has been an increase in interest in using digital resources to contain pandemics. To avoid, detect, monitor, regulate, track, and manage diseases, predict outbreaks and conduct data analysis and decision-making processes, a variety of digital technologies are used, ranging from artificial intelligence (AI)-powered machine learning (ML) or deep learning (DL) focused applications to blockchain technology and big data analytics enabled by cloud computing and the internet of things (IoT). In this paper, we look at how emerging technologies such as the IoT and sensors, AI, ML, DL, blockchain, augmented reality, virtual reality, cloud computing, big data, robots and drones, intelligent mobile apps, and 5G are advancing health care and paving the way to combat the COVID-19 pandemic. The aim of this research is to look at possible technologies, processes, and tools for addressing COVID-19 issues such as pre-screening, early detection, monitoring infected/quarantined individuals, forecasting future infection rates, and more. We also look at the research possibilities that have arisen as a result of the use of emerging technology to handle the COVID-19 crisis.}, } @article {pmid34898269, year = {2021}, author = {Verdu, E and Nieto, YV and Saleem, N}, title = {Call for Special Issue Papers: Cloud Computing and Big Data for Cognitive IoT.}, journal = {Big data}, volume = {9}, number = {6}, pages = {413-414}, doi = {10.1089/big.2021.29048.cfp}, pmid = {34898269}, issn = {2167-647X}, } @article {pmid34897506, year = {2022}, author = {Waitman, LR and Song, X and Walpitage, DL and Connolly, DC and Patel, LP and Liu, M and Schroeder, MC and VanWormer, JJ and Mosa, AS and Anye, ET and Davis, AM}, title = {Enhancing PCORnet Clinical Research Network data completeness by integrating multistate insurance claims with electronic health records in a cloud environment aligned with CMS security and privacy requirements.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {29}, number = {4}, pages = {660-670}, pmid = {34897506}, issn = {1527-974X}, support = {UL1TR002366/NH/NIH HHS/United States ; UL1 TR002366/TR/NCATS NIH HHS/United States ; RI-CRN-2020-003-IC/PCORI/Patient-Centered Outcomes Research Institute/United States ; }, mesh = {Aged ; Centers for Medicare and Medicaid Services, U.S. ; *Electronic Health Records ; Humans ; Medicare ; Obesity ; *Privacy ; United States ; }, abstract = {OBJECTIVE: The Greater Plains Collaborative (GPC) and other PCORnet Clinical Data Research Networks capture healthcare utilization within their health systems. Here, we describe a reusable environment (GPC Reusable Observable Unified Study Environment [GROUSE]) that integrates hospital and electronic health records (EHRs) data with state-wide Medicare and Medicaid claims and assess how claims and clinical data complement each other to identify obesity and related comorbidities in a patient sample.

MATERIALS AND METHODS: EHR, billing, and tumor registry data from 7 healthcare systems were integrated with Center for Medicare (2011-2016) and Medicaid (2011-2012) services insurance claims to create deidentified databases in Informatics for Integrating Biology & the Bedside and PCORnet Common Data Model formats. We describe technical details of how this federally compliant, cloud-based data environment was built. As a use case, trends in obesity rates for different age groups are reported, along with the relative contribution of claims and EHR data-to-data completeness and detecting common comorbidities.

RESULTS: GROUSE contained 73 billion observations from 24 million unique patients (12.9 million Medicare; 13.9 million Medicaid; 6.6 million GPC patients) with 1 674 134 patients crosswalked and 983 450 patients with body mass index (BMI) linked to claims. Diagnosis codes from EHR and claims sources underreport obesity by 2.56 times compared with body mass index measures. However, common comorbidities such as diabetes and sleep apnea diagnoses were more often available from claims diagnoses codes (1.6 and 1.4 times, respectively).

CONCLUSION: GROUSE provides a unified EHR-claims environment to address health system and federal privacy concerns, which enables investigators to generalize analyses across health systems integrated with multistate insurance claims.}, } @article {pmid34895958, year = {2022}, author = {Li, Y and Cianfrocco, MA}, title = {Cloud computing platforms to support cryo-EM structure determination.}, journal = {Trends in biochemical sciences}, volume = {47}, number = {2}, pages = {103-105}, doi = {10.1016/j.tibs.2021.11.005}, pmid = {34895958}, issn = {0968-0004}, mesh = {*Cloud Computing ; Cryoelectron Microscopy ; }, abstract = {Leveraging the power of single-particle cryo-electron microscopy (cryo-EM) requires robust and accessible computational infrastructure. Here, we summarize the cloud computing landscape and picture the outlook of a hybrid cryo-EM computing workflow, and make suggestions to the community to facilitate a future for cryo-EM that integrates into cloud computing infrastructure.}, } @article {pmid34891943, year = {2021}, author = {Zhou, Y and Qian, C and Guo, Y and Wang, Z and Wang, J and Qu, B and Guo, D and You, Y and Qu, X}, title = {XCloud-pFISTA: A Medical Intelligence Cloud for Accelerated MRI.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2021}, number = {}, pages = {3289-3292}, doi = {10.1109/EMBC46164.2021.9630813}, pmid = {34891943}, issn = {2694-0604}, mesh = {Algorithms ; *Artificial Intelligence ; *Image Processing, Computer-Assisted ; Intelligence ; Magnetic Resonance Imaging ; }, abstract = {Machine learning and artificial intelligence have shown remarkable performance in accelerated magnetic resonance imaging (MRI). Cloud computing technologies have great advantages in building an easily accessible platform to deploy advanced algorithms. In this work, we develop an open-access, easy-to-use and high-performance medical intelligence cloud computing platform (XCloud-pFISTA) to reconstruct MRI images from undersampled k-space data. Two state-of-the-art approaches of the Projected Fast Iterative Soft-Thresholding Algorithm (pFISTA) family have been successfully implemented on the cloud. This work can be considered as a good example of cloud-based medical image reconstruction and may benefit the future development of integrated reconstruction and online diagnosis system.}, } @article {pmid34884122, year = {2021}, author = {Kua, J and Loke, SW and Arora, C and Fernando, N and Ranaweera, C}, title = {Internet of Things in Space: A Review of Opportunities and Challenges from Satellite-Aided Computing to Digitally-Enhanced Space Living.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {23}, pages = {}, pmid = {34884122}, issn = {1424-8220}, mesh = {Artificial Intelligence ; Cloud Computing ; *Internet of Things ; Machine Learning ; Technology ; }, abstract = {Recent scientific and technological advancements driven by the Internet of Things (IoT), Machine Learning (ML) and Artificial Intelligence (AI), distributed computing and data communication technologies have opened up a vast range of opportunities in many scientific fields-spanning from fast, reliable and efficient data communication to large-scale cloud/edge computing and intelligent big data analytics. Technological innovations and developments in these areas have also enabled many opportunities in the space industry. The successful Mars landing of NASA's Perseverance rover on 18 February 2021 represents another giant leap for humankind in space exploration. Emerging research and developments of connectivity and computing technologies in IoT for space/non-terrestrial environments is expected to yield significant benefits in the near future. This survey paper presents a broad overview of the area and provides a look-ahead of the opportunities made possible by IoT and space-based technologies. We first survey the current developments of IoT and space industry, and identify key challenges and opportunities in these areas. We then review the state-of-the-art and discuss future opportunities for IoT developments, deployment and integration to support future endeavors in space exploration.}, } @article {pmid34884048, year = {2021}, author = {Sodhro, AH and Zahid, N}, title = {AI-Enabled Framework for Fog Computing Driven E-Healthcare Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {23}, pages = {}, pmid = {34884048}, issn = {1424-8220}, mesh = {Aged ; Artificial Intelligence ; Delivery of Health Care ; Humans ; *Internet of Things ; Reproducibility of Results ; *Telemedicine ; }, abstract = {Artificial Intelligence (AI) is the revolutionary paradigm to empower sixth generation (6G) edge computing based e-healthcare for everyone. Thus, this research aims to promote an AI-based cost-effective and efficient healthcare application. The cyber physical system (CPS) is a key player in the internet world where humans and their personal devices such as cell phones, laptops, wearables, etc., facilitate the healthcare environment. The data extracting, examining and monitoring strategies from sensors and actuators in the entire medical landscape are facilitated by cloud-enabled technologies for absorbing and accepting the entire emerging wave of revolution. The efficient and accurate examination of voluminous data from the sensor devices poses restrictions in terms of bandwidth, delay and energy. Due to the heterogeneous nature of the Internet of Medical Things (IoMT), the driven healthcare system must be smart, interoperable, convergent, and reliable to provide pervasive and cost-effective healthcare platforms. Unfortunately, because of higher power consumption and lesser packet delivery rate, achieving interoperable, convergent, and reliable transmission is challenging in connected healthcare. In such a scenario, this paper has fourfold major contributions. The first contribution is the development of a single chip wearable electrocardiogram (ECG) with the support of an analog front end (AFE) chip model (i.e., ADS1292R) for gathering the ECG data to examine the health status of elderly or chronic patients with the IoT-based cyber physical system (CPS). The second proposes a fuzzy-based sustainable, interoperable, and reliable algorithm (FSIRA), which is an intelligent and self-adaptive decision-making approach to prioritize emergency and critical patients in association with the selected parameters for improving healthcare quality at reasonable costs. The third is the proposal of a specific cloud-based architecture for mobile and connected healthcare. The fourth is the identification of the right balance between reliability, packet loss ratio, convergence, latency, interoperability, and throughput to support an adaptive IoMT driven connected healthcare. It is examined and observed that our proposed approaches outperform the conventional techniques by providing high reliability, high convergence, interoperability, and a better foundation to analyze and interpret the accuracy in systems from a medical health aspect. As for the IoMT, an enabled healthcare cloud is the key ingredient on which to focus, as it also faces the big hurdle of less bandwidth, more delay and energy drain. Thus, we propose the mathematical trade-offs between bandwidth, interoperability, reliability, delay, and energy dissipation for IoMT-oriented smart healthcare over a 6G platform.}, } @article {pmid34883979, year = {2021}, author = {Lazazzera, R and Laguna, P and Gil, E and Carrault, G}, title = {Proposal for a Home Sleep Monitoring Platform Employing a Smart Glove.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {23}, pages = {}, pmid = {34883979}, issn = {1424-8220}, mesh = {Humans ; *Oxygen Saturation ; Photoplethysmography ; Polysomnography ; Sleep ; *Sleep Apnea Syndromes/diagnosis ; }, abstract = {The present paper proposes the design of a sleep monitoring platform. It consists of an entire sleep monitoring system based on a smart glove sensor called UpNEA worn during the night for signals acquisition, a mobile application, and a remote server called AeneA for cloud computing. UpNEA acquires a 3-axis accelerometer signal, a photoplethysmography (PPG), and a peripheral oxygen saturation (SpO2) signal from the index finger. Overnight recordings are sent from the hardware to a mobile application and then transferred to AeneA. After cloud computing, the results are shown in a web application, accessible for the user and the clinician. The AeneA sleep monitoring activity performs different tasks: sleep stages classification and oxygen desaturation assessment; heart rate and respiration rate estimation; tachycardia, bradycardia, atrial fibrillation, and premature ventricular contraction detection; and apnea and hypopnea identification and classification. The PPG breathing rate estimation algorithm showed an absolute median error of 0.5 breaths per minute for the 32 s window and 0.2 for the 64 s window. The apnea and hypopnea detection algorithm showed an accuracy (Acc) of 75.1%, by windowing the PPG in one-minute segments. The classification task revealed 92.6% Acc in separating central from obstructive apnea, 83.7% in separating central apnea from central hypopnea and 82.7% in separating obstructive apnea from obstructive hypopnea. The novelty of the integrated algorithms and the top-notch cloud computing products deployed, encourage the production of the proposed solution for home sleep monitoring.}, } @article {pmid34883895, year = {2021}, author = {Guo, K and Liu, C and Zhao, S and Lu, J and Zhang, S and Yang, H}, title = {Design of a Millimeter-Wave Radar Remote Monitoring System for the Elderly Living Alone Using WIFI Communication.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {23}, pages = {}, pmid = {34883895}, issn = {1424-8220}, support = {2017YFB1304103//national key R&D program of china/ ; }, mesh = {Aged ; Algorithms ; Communication ; Heart Rate ; Home Environment ; Humans ; *Radar ; *Signal Processing, Computer-Assisted ; }, abstract = {In response to the current demand for the remote monitoring of older people living alone, a non-contact human vital signs monitoring system based on millimeter wave radar has gradually become the object of research. This paper mainly carried out research regarding the detection method to obtain human breathing and heartbeat signals using a frequency modulated continuous wave system. We completed a portable millimeter-wave radar module for wireless communication. The radar module was a small size and had a WIFI communication interface, so we only needed to provide a power cord for the radar module. The breathing and heartbeat signals were detected and separated by FIR digital filter and the wavelet transform method. By building a cloud computing framework, we realized remote and senseless monitoring of the vital signs for older people living alone. Experiments were also carried out to compare the performance difference between the system and the common contact detection system. The experimental results showed that the life parameter detection system based on the millimeter wave sensor has strong real-time performance and accuracy.}, } @article {pmid34883857, year = {2021}, author = {Akram, J and Tahir, A and Munawar, HS and Akram, A and Kouzani, AZ and Mahmud, MAP}, title = {Cloud- and Fog-Integrated Smart Grid Model for Efficient Resource Utilisation.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {23}, pages = {}, pmid = {34883857}, issn = {1424-8220}, mesh = {Algorithms ; *Cloud Computing ; *Computer Systems ; Reproducibility of Results ; }, abstract = {The smart grid (SG) is a contemporary electrical network that enhances the network's performance, reliability, stability, and energy efficiency. The integration of cloud and fog computing with SG can increase its efficiency. The combination of SG with cloud computing enhances resource allocation. To minimise the burden on the Cloud and optimise resource allocation, the concept of fog computing integration with cloud computing is presented. Fog has three essential functionalities: location awareness, low latency, and mobility. We offer a cloud and fog-based architecture for information management in this study. By allocating virtual machines using a load-balancing mechanism, fog computing makes the system more efficient (VMs). We proposed a novel approach based on binary particle swarm optimisation with inertia weight adjusted using simulated annealing. The technique is named BPSOSA. Inertia weight is an important factor in BPSOSA which adjusts the size of the search space for finding the optimal solution. The BPSOSA technique is compared against the round robin, odds algorithm, and ant colony optimisation. In terms of response time, BPSOSA outperforms round robin, odds algorithm, and ant colony optimisation by 53.99 ms, 82.08 ms, and 81.58 ms, respectively. In terms of processing time, BPSOSA outperforms round robin, odds algorithm, and ant colony optimisation by 52.94 ms, 81.20 ms, and 80.56 ms, respectively. Compared to BPSOSA, ant colony optimisation has slightly better cost efficiency, however, the difference is insignificant.}, } @article {pmid34883848, year = {2021}, author = {Bravo-Arrabal, J and Toscano-Moreno, M and Fernandez-Lozano, JJ and Mandow, A and Gomez-Ruiz, JA and García-Cerezo, A}, title = {The Internet of Cooperative Agents Architecture (X-IoCA) for Robots, Hybrid Sensor Networks, and MEC Centers in Complex Environments: A Search and Rescue Case Study.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {23}, pages = {}, pmid = {34883848}, issn = {1424-8220}, support = {RTI2018-093421-B-I00//Spanish Ministerio de Ciencia, Innovación y Universidades, Gobierno de España/ ; BES-2016-077022//"Spanish Predoctoral Grant from the Subprograma Estatal de Formaci\'on del MICINN" co-financed by the European Social Fund./ ; C007/18-SP//"Piloto 5G Andalucía" initiative, promoted by the Ministerio de Asuntos Económicos y Transformación Digital, through Red.es, being developed by Vodafone and Huawei/ ; }, mesh = {*Disasters ; Feedback ; Humans ; *Internet of Things ; Rescue Work ; *Robotics ; }, abstract = {Cloud robotics and advanced communications can foster a step-change in cooperative robots and hybrid wireless sensor networks (H-WSN) for demanding environments (e.g., disaster response, mining, demolition, and nuclear sites) by enabling the timely sharing of data and computational resources between robot and human teams. However, the operational complexity of such multi-agent systems requires defining effective architectures, coping with implementation details, and testing in realistic deployments. This article proposes X-IoCA, an Internet of robotic things (IoRT) and communication architecture consisting of a hybrid and heterogeneous network of wireless transceivers (H2WTN), based on LoRa and BLE technologies, and a robot operating system (ROS) network. The IoRT is connected to a feedback information system (FIS) distributed among multi-access edge computing (MEC) centers. Furthermore, we present SAR-IoCA, an implementation of the architecture for search and rescue (SAR) integrated into a 5G network. The FIS for this application consists of an SAR-FIS (including a path planner for UGVs considering risks detected by a LoRa H-WSN) and an ROS-FIS (for real-time monitoring and processing of information published throughout the ROS network). Moreover, we discuss lessons learned from using SAR-IoCA in a realistic exercise where three UGVs, a UAV, and responders collaborated to rescue victims from a tunnel accessible through rough terrain.}, } @article {pmid34883819, year = {2021}, author = {Huang, CE and Li, YH and Aslam, MS and Chang, CC}, title = {Super-Resolution Generative Adversarial Network Based on the Dual Dimension Attention Mechanism for Biometric Image Super-Resolution.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {23}, pages = {}, pmid = {34883819}, issn = {1424-8220}, mesh = {*Biometry ; Humans ; *Image Processing, Computer-Assisted ; Research Design ; }, abstract = {There exist many types of intelligent security sensors in the environment of the Internet of Things (IoT) and cloud computing. Among them, the sensor for biometrics is one of the most important types. Biometric sensors capture the physiological or behavioral features of a person, which can be further processed with cloud computing to verify or identify the user. However, a low-resolution (LR) biometrics image causes the loss of feature details and reduces the recognition rate hugely. Moreover, the lack of resolution negatively affects the performance of image-based biometric technology. From a practical perspective, most of the IoT devices suffer from hardware constraints and the low-cost equipment may not be able to meet various requirements, particularly for image resolution, because it asks for additional storage to store high-resolution (HR) images, and a high bandwidth to transmit the HR image. Therefore, how to achieve high accuracy for the biometric system without using expensive and high-cost image sensors is an interesting and valuable issue in the field of intelligent security sensors. In this paper, we proposed DDA-SRGAN, which is a generative adversarial network (GAN)-based super-resolution (SR) framework using the dual-dimension attention mechanism. The proposed model can be trained to discover the regions of interest (ROI) automatically in the LR images without any given prior knowledge. The experiments were performed on the CASIA-Thousand-v4 and the CelebA datasets. The experimental results show that the proposed method is able to learn the details of features in crucial regions and achieve better performance in most cases.}, } @article {pmid34883778, year = {2021}, author = {Erhan, L and Di Mauro, M and Anjum, A and Bagdasar, O and Song, W and Liotta, A}, title = {Embedded Data Imputation for Environmental Intelligent Sensing: A Case Study.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {23}, pages = {}, pmid = {34883778}, issn = {1424-8220}, mesh = {*Cloud Computing ; *Machine Learning ; }, abstract = {Recent developments in cloud computing and the Internet of Things have enabled smart environments, in terms of both monitoring and actuation. Unfortunately, this often results in unsustainable cloud-based solutions, whereby, in the interest of simplicity, a wealth of raw (unprocessed) data are pushed from sensor nodes to the cloud. Herein, we advocate the use of machine learning at sensor nodes to perform essential data-cleaning operations, to avoid the transmission of corrupted (often unusable) data to the cloud. Starting from a public pollution dataset, we investigate how two machine learning techniques (kNN and missForest) may be embedded on Raspberry Pi to perform data imputation, without impacting the data collection process. Our experimental results demonstrate the accuracy and computational efficiency of edge-learning methods for filling in missing data values in corrupted data series. We find that kNN and missForest correctly impute up to 40% of randomly distributed missing values, with a density distribution of values that is indistinguishable from the benchmark. We also show a trade-off analysis for the case of bursty missing values, with recoverable blocks of up to 100 samples. Computation times are shorter than sampling periods, allowing for data imputation at the edge in a timely manner.}, } @article {pmid34876968, year = {2021}, author = {Liu, S and Jiang, L and Wang, X}, title = {Intelligent Internet of Things Medical Technology in Implantable Intravenous Infusion Port in Children with Malignant Tumors.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {8936820}, pmid = {34876968}, issn = {2040-2309}, mesh = {Child ; Cloud Computing ; Humans ; Infusions, Intravenous ; Internet ; *Internet of Things ; *Neoplasms/drug therapy ; Technology ; }, abstract = {Due to the recent technological revolution that is centered around information technology, the Internet of Medical Things (IoMT) has become an important research domain. IoMT is a combination of Internet of Things (IoT), big data, cloud computing, ubiquitous network, and three-dimensional holographic technology, which is used to build a smart medical diagnosis and treatment system. Additionally, this system should automate various activities, such as the patient's health record and health monitoring, which is an important issue in the development of modern and smart healthcare system. In this paper, we have thoroughly examined the role of a smart healthcare system architecture and other key supporting technologies in improving the health status of both indoor and outdoor patients. The proposed system has the capacity to investigate and predict (if feasible) the clinical application and nursing effects of totally implantable intravenous port (TIVAP) in pediatric hematological tumors. For this purpose, seventy children with hematologic tumors were treated with TIVAP, and IoMT-enabled care was provided to them, where the occurrence of adverse events, specifically after the treatment, was observed. The experimental results collected after the 70 children were treated and cared for by TIVAP show that there were five cases of adverse events, whereas the incidence rate of the adverse events was 7.14%. Moreover, TIVAP has significant efficacy in the treatment of hematologic tumors in children, and it equally reduces the vascular injury caused by chemotherapy in younger patients. Likewise, targeted care reduces the incidence of adverse events in children with expected ratio.}, } @article {pmid34876786, year = {2021}, author = {Hardy, NP and Cahill, RA}, title = {Digital surgery for gastroenterological diseases.}, journal = {World journal of gastroenterology}, volume = {27}, number = {42}, pages = {7240-7246}, pmid = {34876786}, issn = {2219-2840}, mesh = {Algorithms ; Artificial Intelligence ; *Gastroenterology ; Humans ; Machine Learning ; *Robotics ; }, abstract = {Advances in machine learning, computer vision and artificial intelligence methods, in combination with those in processing and cloud computing capability, portend the advent of true decision support during interventions in real-time and soon perhaps in automated surgical steps. Such capability, deployed alongside technology intraoperatively, is termed digital surgery and can be delivered without the need for high-end capital robotic investment. An area close to clinical usefulness right now harnesses advances in near infrared endolaparoscopy and fluorescence guidance for tissue characterisation through the use of biophysics-inspired algorithms. This represents a potential synergistic methodology for the deep learning methods currently advancing in ophthalmology, radiology, and recently gastroenterology via colonoscopy. As databanks of more general surgical videos are created, greater analytic insights can be derived across the operative spectrum of gastroenterological disease and operations (including instrumentation and operative step sequencing and recognition, followed over time by surgeon and instrument performance assessment) and linked to value-based outcomes. However, issues of legality, ethics and even morality need consideration, as do the limiting effects of monopolies, cartels and isolated data silos. Furthermore, the role of the surgeon, surgical societies and healthcare institutions in this evolving field needs active deliberation, as the default risks relegation to bystander or passive recipient. This editorial provides insight into this accelerating field by illuminating the near-future and next decade evolutionary steps towards widespread clinical integration for patient and societal benefit.}, } @article {pmid34849397, year = {2021}, author = {Karim, HMR and Singha, SK and Neema, PK and Baruah, TD and Ray, R and Mohanty, D and Siddiqui, MS and Nanda, R and Bodhey, NK}, title = {Information technology-based joint preoperative assessment, risk stratification and its impact on patient management, perioperative outcome, and cost.}, journal = {Discoveries (Craiova, Romania)}, volume = {9}, number = {2}, pages = {e130}, pmid = {34849397}, issn = {2359-7232}, abstract = {BACKGROUND: Despite negative recommendations, routine preoperative testing practice is nearly universal. Our aim is to bring the healthcare providers on one platform by using information-technology based preanaesthetic assessment and evaluate the routine preoperative testing's impact on patient outcome and cost.

METHODS: A prospective, non-randomised study was conducted in a teaching hospital during January 2019-August 2020. A locally developed software and cloud-computing were used as a tool to modify preanaesthesia evaluation. The number of investigations ordered, time taken, cost incurred, were compared with the routine practice. Further data were matched as per surgical invasiveness and the patient's physical status. Appropriate tests compared intergroup differences and p-value <0.05 was considered significant.  Results: Data from 114 patients (58 in routine and 56 in patient and surgery specific) were analysed. Patient and surgery specific investigation led to a reduction in the investigations by 80-90%, hospital visit by 50%, and the total cost by 80%, without increasing the day of surgery cancellation or complications.

CONCLUSION: Information technology-based joint preoperative assessment and risk stratification are feasible through locally developed software with minimal cost. It helps in applying patient and surgery specific investigation, reducing the number of tests, hospital visit, and cost, without adversely affecting the perioperative outcome. The application of the modified method will help in cost-effective, yet quality and safe perioperative healthcare delivery. It will also benefit the public from both service and economic perspective.}, } @article {pmid34848776, year = {2021}, author = {Yan, X and Wang, J}, title = {Dynamic monitoring of urban built-up object expansion trajectories in Karachi, Pakistan with time series images and the LandTrendr algorithm.}, journal = {Scientific reports}, volume = {11}, number = {1}, pages = {23118}, pmid = {34848776}, issn = {2045-2322}, abstract = {In the complex process of urbanization, retrieving its dynamic expansion trajectories with an efficient method is challenging, especially for urban regions that are not clearly distinguished from the surroundings in arid regions. In this study, we propose a framework for extracting spatiotemporal change information on urban disturbances. First, the urban built-up object areas in 2000 and 2020 were obtained using object-oriented segmentation method. Second, we applied LandTrendr (LT) algorithm and multiple bands/indices to extract annual spatiotemporal information. This process was implemented effectively with the support of the cloud computing platform of Earth Observation big data. The overall accuracy of time information extraction, the kappa coefficient, and average detection error were 83.76%, 0.79, and 0.57 a, respectively. These results show that Karachi expanded continuously during 2000-2020, with an average annual growth rate of 4.7%. However, this expansion was not spatiotemporally balanced. The coastal area developed quickly within a shorter duration, whereas the main newly added urban regions locate in the northern and eastern inland areas. This study demonstrated an effective framework for extract the dynamic spatiotemporal change information of urban built-up objects and substantially eliminate the salt-and-pepper effect based on pixel detection. Methods used in our study are of general promotion significance in the monitoring of other disturbances caused by natural or human activities.}, } @article {pmid34847040, year = {2023}, author = {Ni, Z and Chen, H and Li, Z and Wang, X and Yan, N and Liu, W and Xia, F}, title = {MSCET: A Multi-Scenario Offloading Schedule for Biomedical Data Processing and Analysis in Cloud-Edge-Terminal Collaborative Vehicular Networks.}, journal = {IEEE/ACM transactions on computational biology and bioinformatics}, volume = {20}, number = {4}, pages = {2376-2386}, doi = {10.1109/TCBB.2021.3131177}, pmid = {34847040}, issn = {1557-9964}, abstract = {With the rapid development of Artificial Intelligence (AI) and Internet of Things (IoTs), an increasing number of computation intensive or delay sensitive biomedical data processing and analysis tasks are produced in vehicles, bringing more and more challenges to the biometric monitoring of drivers. Edge computing is a new paradigm to solve these challenges by offloading tasks from the resource-limited vehicles to Edge Servers (ESs) in Road Side Units (RSUs). However, most of the traditional offloading schedules for vehicular networks concentrate on the edge, while some tasks may be too complex for ESs to process. To this end, we consider a collaborative vehicular network in which the cloud, edge and terminal can cooperate with each other to accomplish the tasks. The vehicles can offload the computation intensive tasks to the cloud to save the resource of edge. We further construct the virtual resource pool which can integrate the resource of multiple ESs since some regions may be covered by multiple RSUs. In this paper, we propose a Multi-Scenario offloading schedule for biomedical data processing and analysis in Cloud-Edge-Terminal collaborative vehicular networks called MSCET. The parameters of the proposed MSCET are optimized to maximize the system utility. We also conduct extensive simulations to evaluate the proposed MSCET and the results illustrate that MSCET outperforms other existing schedules.}, } @article {pmid34841104, year = {2021}, author = {Samudra, Y and Ahmad, T}, title = {Improved prediction error expansion and mirroring embedded samples for enhancing reversible audio data hiding.}, journal = {Heliyon}, volume = {7}, number = {11}, pages = {e08381}, pmid = {34841104}, issn = {2405-8440}, abstract = {Many applications work by processing either small or big data, including sensitive and confidential ones, through computer networks like cloud computing. However, many systems are public and may not provide enough security mechanisms. Meanwhile, once the data are compromised, the security and privacy of the users will suffer from serious problems. Therefore, security protection is much required in various aspects, and one of how it is done is by embedding the data (payload) in another form of data (cover) such as audio. However, the existing methods do not provide enough space to accommodate the payload, so bigger data can not be taken; the quality of the respective generated data is relatively low, making it much different from its corresponding cover. This research works on these problems by improving a prediction error expansion-based algorithm and designing a mirroring embedded sample scheme. Here, a processed audio sample is forced to be as close as possible to the original one. The experimental results show that this proposed method produces a higher quality of stego data considering the size of the payloads. It achieves more than 100 dB, which is higher than that of the compared algorithms. Additionally, this proposed method is reversible, which means that both the original payload and the audio cover can be fully reconstructed.}, } @article {pmid34833792, year = {2021}, author = {Shah, SC}, title = {Design of a Machine Learning-Based Intelligent Middleware Platform for a Heterogeneous Private Edge Cloud System.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {22}, pages = {}, pmid = {34833792}, issn = {1424-8220}, mesh = {*Algorithms ; *Cloud Computing ; Machine Learning ; Privacy ; }, abstract = {Recent advances in mobile technologies have facilitated the development of a new class of smart city and fifth-generation (5G) network applications. These applications have diverse requirements, such as low latencies, high data rates, significant amounts of computing and storage resources, and access to sensors and actuators. A heterogeneous private edge cloud system was proposed to address the requirements of these applications. The proposed heterogeneous private edge cloud system is characterized by a complex and dynamic multilayer network and computing infrastructure. Efficient management and utilization of this infrastructure may increase data rates and reduce data latency, data privacy risks, and traffic to the core Internet network. A novel intelligent middleware platform is proposed in the current study to manage and utilize heterogeneous private edge cloud infrastructure efficiently. The proposed platform aims to provide computing, data collection, and data storage services to support emerging resource-intensive and non-resource-intensive smart city and 5G network applications. It aims to leverage regression analysis and reinforcement learning methods to solve the problem of efficiently allocating heterogeneous resources to application tasks. This platform adopts parallel transmission techniques, dynamic interface allocation techniques, and machine learning-based algorithms in a dynamic multilayer network infrastructure to improve network and application performance. Moreover, it uses container and device virtualization technologies to address problems related to heterogeneous hardware and execution environments.}, } @article {pmid34833723, year = {2021}, author = {Fatima, M and Nisar, MW and Rashid, J and Kim, J and Kamran, M and Hussain, A}, title = {A Novel Fingerprinting Technique for Data Storing and Sharing through Clouds.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {22}, pages = {}, pmid = {34833723}, issn = {1424-8220}, support = {S3033853 and No.2020R1I1A3069700//National Research Foundation of Korea/ ; }, mesh = {Cloud Computing ; *Computer Security ; Confidentiality ; *Electronic Health Records ; Privacy ; Technology ; }, abstract = {With the emerging growth of digital data in information systems, technology faces the challenge of knowledge prevention, ownership rights protection, security, and privacy measurement of valuable and sensitive data. On-demand availability of various data as services in a shared and automated environment has become a reality with the advent of cloud computing. The digital fingerprinting technique has been adopted as an effective solution to protect the copyright and privacy of digital properties from illegal distribution and identification of malicious traitors over the cloud. Furthermore, it is used to trace the unauthorized distribution and the user of multimedia content distributed through the cloud. In this paper, we propose a novel fingerprinting technique for the cloud environment to protect numeric attributes in relational databases for digital privacy management. The proposed solution with the novel fingerprinting scheme is robust and efficient. It can address challenges such as embedding secure data over the cloud, essential to secure relational databases. The proposed technique provides a decoding accuracy of 100%, 90%, and 40% for 10% to 30%, 40%, and 50% of deleted records.}, } @article {pmid34831620, year = {2021}, author = {Huang, C and Yang, Q and Huang, W}, title = {Analysis of the Spatial and Temporal Changes of NDVI and Its Driving Factors in the Wei and Jing River Basins.}, journal = {International journal of environmental research and public health}, volume = {18}, number = {22}, pages = {}, pmid = {34831620}, issn = {1660-4601}, mesh = {China ; *Climate Change ; Human Activities ; Humans ; *Rivers ; Seasons ; Temperature ; }, abstract = {This study aimed to explore the long-term vegetation cover change and its driving factors in the typical watershed of the Yellow River Basin. This research was based on the Google Earth Engine (GEE), a remote sensing cloud platform, and used the Landsat surface reflectance datasets and the Pearson correlation method to analyze the vegetation conditions in the areas above Xianyang on the Wei River and above Zhangjiashan on the Jing River. Random forest and decision tree models were used to analyze the effects of various climatic factors (precipitation, temperature, soil moisture, evapotranspiration, and drought index) on NDVI (normalized difference vegetation index). Then, based on the residual analysis method, the effects of human activities on NDVI were explored. The results showed that: (1) From 1987 to 2018, the NDVI of the two watersheds showed an increasing trend; in particular, after 2008, the average increase rate of NDVI in the growing season (April to September) increased from 0.0032/a and 0.003/a in the base period (1987-2008) to 0.0172/a and 0.01/a in the measurement period (2008-2018), for the Wei and Jing basins, respectively. In addition, the NDVI significantly increased from 21.78% and 31.32% in the baseline period (1987-2008) to 83.76% and 92.40% in the measurement period (2008-2018), respectively. (2) The random forest and classification and regression tree model (CART) can assess the contribution and sensitivity of various climate factors to NDVI. Precipitation, soil moisture, and temperature were found to be the three main factors that affect the NDVI of the study area, and their contributions were 37.05%, 26.42%, and 15.72%, respectively. The changes in precipitation and soil moisture in the entire Jing River Basin and the upper and middle reaches of the Wei River above Xianyang caused significant changes in NDVI. Furthermore, changes in precipitation and temperature led to significant changes in NDVI in the lower reaches of the Wei River. (3) The impact of human activities in the Wei and Jing basins on NDVI has gradually changed from negative to positive, which is mainly due to the implementation of soil and water conservation measures. The proportions of areas with positive effects of human activities were 80.88% and 81.95%, of which the proportions of areas with significant positive effects were 11.63% and 7.76%, respectively. These are mainly distributed in the upper reaches of the Wei River and the western and eastern regions of the Jing River. These areas are the key areas where soil and water conservation measures have been implemented in recent years, and the corresponding land use has transformed from cultivated land to forest and grassland. The negative effects accounted for 1.66% and 0.10% of the area, respectively, and were mainly caused by urban expansion and coal mining.}, } @article {pmid34828597, year = {2021}, author = {Bhatia, S and Malhotra, J}, title = {Morton Filter-Based Security Mechanism for Healthcare System in Cloud Computing.}, journal = {Healthcare (Basel, Switzerland)}, volume = {9}, number = {11}, pages = {}, pmid = {34828597}, issn = {2227-9032}, abstract = {Electronic health records contain the patient's sensitive information. If these data are acquired by a malicious user, it will not only cause the pilferage of the patient's personal data but also affect the diagnosis and treatment. One of the most challenging tasks in cloud-based healthcare systems is to provide security and privacy to electronic health records. Various probabilistic data structures and watermarking techniques were used in the cloud-based healthcare systems to secure patient's data. Most of the existing studies focus on cuckoo and bloom filters, without considering their throughputs. In this research, a novel cloud security mechanism is introduced, which supersedes the shortcomings of existing approaches. The proposed solution enhances security with methods such as fragile watermark, least significant bit replacement watermarking, class reliability factor, and Morton filters included in the formation of the security mechanism. A Morton filter is an approximate set membership data structure (ASMDS) that proves many improvements to other data structures, such as cuckoo, bloom, semi-sorting cuckoo, and rank and select quotient filters. The Morton filter improves security; it supports insertions, deletions, and lookups operations and improves their respective throughputs by 0.9× to 15.5×, 1.3× to 1.6×, and 1.3× to 2.5×, when compared to cuckoo filters. We used Hadoop version 0.20.3, and the platform was Red Hat Enterprise Linux 6; we executed five experiments, and the average of the results has been taken. The results of the simulation work show that our proposed security mechanism provides an effective solution for secure data storage in cloud-based healthcare systems, with a load factor of 0.9. Furthermore, to aid cloud security in healthcare systems, we presented the motivation, objectives, related works, major research gaps, and materials and methods; we, thus, presented and implemented a cloud security mechanism, in the form of an algorithm and a set of results and conclusions.}, } @article {pmid34823545, year = {2021}, author = {Wilson, PH and Rogers, JM and Vogel, K and Steenbergen, B and McGuckian, TB and Duckworth, J}, title = {Home-based (virtual) rehabilitation improves motor and cognitive function for stroke patients: a randomized controlled trial of the Elements (EDNA-22) system.}, journal = {Journal of neuroengineering and rehabilitation}, volume = {18}, number = {1}, pages = {165}, pmid = {34823545}, issn = {1743-0003}, mesh = {Adult ; Australia ; Cognition ; Humans ; Recovery of Function ; *Stroke ; *Stroke Rehabilitation/methods ; Treatment Outcome ; Upper Extremity ; }, abstract = {BACKGROUND: Home-based rehabilitation of arm function is a significant gap in service provision for adult stroke. The EDNA-22 tablet is a portable virtual rehabilitation-based system that provides a viable option for home-based rehabilitation using a suite of tailored movement tasks, and performance monitoring via cloud computing data storage. The study reported here aimed to compare use of the EDNA system with an active control (Graded Repetitive Arm Supplementary Program-GRASP training) group using a parallel RCT design.

METHODS: Of 19 originally randomized, 17 acute-care patients with upper-extremity dysfunction following unilateral stroke completed training in either the treatment (n = 10) or active control groups (n = 7), each receiving 8-weeks of in-home training involving 30-min sessions scheduled 3-4 times weekly. Performance was assessed across motor, cognitive and functional behaviour in the home. Primary motor measures, collected by a blinded assessor, were the Box and Blocks Task (BBT) and 9-Hole Pegboard Test (9HPT), and for cognition the Montreal Cognitive Assessment (MoCA). Functional behaviour was assessed using the Stroke Impact Scale (SIS) and Neurobehavioural Functioning Inventory (NFI).

RESULTS: One participant from each group withdrew for personal reasons. No adverse events were reported. Results showed a significant and large improvement in performance on the BBT for the more-affected hand in the EDNA training group, only (g = 0.90). There was a mild-to-moderate effect of training on the 9HPT for EDNA (g = 0.55) and control (g = 0.42) groups, again for the more affected hand. In relation to cognition, performance on the MoCA improved for the EDNA group (g = 0.70). Finally, the EDNA group showed moderate (but non-significant) improvement in functional behaviour on the SIS (g = 0.57) and NFI (g = 0.49).

CONCLUSION: A short course of home-based training using the EDNA-22 system can yield significant gains in motor and cognitive performance, over and above an active control training that also targets upper-limb function. Intriguingly, these changes in performance were corroborated only tentatively in the reports of caregivers. We suggest that future research consider how the implementation of home-based rehabilitation technology can be optimized. We contend that self-administered digitally-enhanced training needs to become part of the health literacy of all stakeholders who are impacted by stroke and other acquired brain injuries. Trial registration Australian New Zealand Clinical Trials Registry (ANZCTR) Number: ACTRN12619001557123. Registered 12 November 2019, http://www.anzctr.org.au/Trial/Registration/TrialReview.aspx?id=378298&isReview=true.}, } @article {pmid34817058, year = {2021}, author = {Suvakov, M and Panda, A and Diesh, C and Holmes, I and Abyzov, A}, title = {CNVpytor: a tool for copy number variation detection and analysis from read depth and allele imbalance in whole-genome sequencing.}, journal = {GigaScience}, volume = {10}, number = {11}, pages = {}, pmid = {34817058}, issn = {2047-217X}, support = {U24 CA220242/CA/NCI NIH HHS/United States ; }, mesh = {Alleles ; *DNA Copy Number Variations ; Genomics ; High-Throughput Nucleotide Sequencing ; Sequence Analysis, DNA ; *Software ; Whole Genome Sequencing ; }, abstract = {BACKGROUND: Detecting copy number variations (CNVs) and copy number alterations (CNAs) based on whole-genome sequencing data is important for personalized genomics and treatment. CNVnator is one of the most popular tools for CNV/CNA discovery and analysis based on read depth.

FINDINGS: Herein, we present an extension of CNVnator developed in Python-CNVpytor. CNVpytor inherits the reimplemented core engine of its predecessor and extends visualization, modularization, performance, and functionality. Additionally, CNVpytor uses B-allele frequency likelihood information from single-nucleotide polymorphisms and small indels data as additional evidence for CNVs/CNAs and as primary information for copy number-neutral losses of heterozygosity.

CONCLUSIONS: CNVpytor is significantly faster than CNVnator-particularly for parsing alignment files (2-20 times faster)-and has (20-50 times) smaller intermediate files. CNV calls can be filtered using several criteria, annotated, and merged over multiple samples. Modular architecture allows it to be used in shared and cloud environments such as Google Colab and Jupyter notebook. Data can be exported into JBrowse, while a lightweight plugin version of CNVpytor for JBrowse enables nearly instant and GUI-assisted analysis of CNVs by any user. CNVpytor release and the source code are available on GitHub at https://github.com/abyzovlab/CNVpytor under the MIT license.}, } @article {pmid34814342, year = {2021}, author = {Shamshirband, S and Joloudari, JH and Shirkharkolaie, SK and Mojrian, S and Rahmani, F and Mostafavi, S and Mansor, Z}, title = {Game theory and evolutionary optimization approaches applied to resource allocation problems in computing environments: A survey.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {18}, number = {6}, pages = {9190-9232}, doi = {10.3934/mbe.2021453}, pmid = {34814342}, issn = {1551-0018}, mesh = {Cloud Computing ; Computers ; *Game Theory ; *Internet of Things ; Resource Allocation ; }, abstract = {Today's intelligent computing environments, including the Internet of Things (IoT), Cloud Computing (CC), Fog Computing (FC), and Edge Computing (EC), allow many organizations worldwide to optimize their resource allocation regarding the quality of service and energy consumption. Due to the acute conditions of utilizing resources by users and the real-time nature of the data, a comprehensive and integrated computing environment has not yet provided a robust and reliable capability for proper resource allocation. Although traditional resource allocation approaches in a low-capacity hardware resource system are efficient for small-scale resource providers, for a complex system in the conditions of dynamic computing resources and fierce competition in obtaining resources, they cannot develop and adaptively manage the conditions optimally. To optimize the resource allocation with minimal delay, low energy consumption, minimum computational complexity, high scalability, and better resource utilization efficiency, CC/FC/EC/IoT-based computing architectures should be designed intelligently. Therefore, the objective of this research is a comprehensive survey on resource allocation problems using computational intelligence-based evolutionary optimization and mathematical game theory approaches in different computing environments according to the latest scientific research achievements.}, } @article {pmid34814341, year = {2021}, author = {Liu, Y and Huang, W and Wang, L and Zhu, Y and Chen, N}, title = {Dynamic computation offloading algorithm based on particle swarm optimization with a mutation operator in multi-access edge computing.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {18}, number = {6}, pages = {9163-9189}, doi = {10.3934/mbe.2021452}, pmid = {34814341}, issn = {1551-0018}, mesh = {*Algorithms ; Mutation ; Probability ; }, abstract = {The current computation offloading algorithm for the mobile cloud ignores the selection of offloading opportunities and does not consider the uninstall frequency, resource waste, and energy efficiency reduction of the user's offloading success probability. Therefore, in this study, a dynamic computation offloading algorithm based on particle swarm optimization with a mutation operator in a multi-access edge computing environment is proposed (DCO-PSOMO). According to the CPU utilization and the memory utilization rate of the mobile terminal, this method can dynamically obtain the overload time by using a strong, locally weighted regression method. After detecting the overload time, the probability of successful downloading is predicted by the mobile user's dwell time and edge computing communication range, and the offloading is either conducted immediately or delayed. A computation offloading model was established via the use of the response time and energy consumption of the mobile terminal. Additionally, the optimal computing offloading algorithm was designed via the use of a particle swarm with a mutation operator. Finally, the DCO-PSOMO algorithm was compared with the JOCAP, ECOMC and ESRLR algorithms, and the experimental results demonstrated that the DCO-PSOMO offloading method can effectively reduce the offloading cost and terminal energy consumption, and improves the success probability of offloading and the user's QoS.}, } @article {pmid34814262, year = {2021}, author = {Al-Zumia, FA and Tian, Y and Al-Rodhaan, M}, title = {A novel fault-tolerant privacy-preserving cloud-based data aggregation scheme for lightweight health data.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {18}, number = {6}, pages = {7539-7560}, doi = {10.3934/mbe.2021373}, pmid = {34814262}, issn = {1551-0018}, mesh = {Algorithms ; Cloud Computing ; *Computer Security ; Confidentiality ; Data Aggregation ; Humans ; *Privacy ; }, abstract = {Mobile health networks (MHNWs) have facilitated instant medical health care and remote health monitoring for patients. Currently, a vast amount of health data needs to be quickly collected, processed and analyzed. The main barrier to doing so is the limited amount of the computational storage resources that are required for MHNWs. Therefore, health data must be outsourced to the cloud. Although the cloud has the benefits of powerful computation capabilities and intensive storage resources, security and privacy concerns exist. Therefore, our study examines how to collect and aggregate these health data securely and efficiently, with a focus on the theoretical importance and application potential of the aggregated data. In this work, we propose a novel design for a private and fault-tolerant cloud-based data aggregation scheme. Our design is based on a future ciphertext mechanism for improving the fault tolerance capabilities of MHNWs. Our scheme is privatized via differential privacy, which is achieved by encrypting noisy health data and enabling the cloud to obtain the results of only the noisy sum. Our scheme is efficient, reliable and secure and combines different approaches and algorithms to improve the security and efficiency of the system. Our proposed scheme is evaluated with an extensive simulation study, and the simulation results show that it is efficient and reliable. The computational cost of our scheme is significantly less than that of the related scheme. The aggregation error is minimized from ${\rm{O}}\left({\sqrt {{\bf{w + 1}}} } \right)$ in the related scheme to O(1) in our scheme.}, } @article {pmid34812799, year = {2021}, author = {Huang, L and Tian, S and Zhao, W and Liu, K and Ma, X and Guo, J and Yin, M}, title = {5G-Enabled intelligent construction of a chest pain center with up-conversion lateral flow immunoassay.}, journal = {The Analyst}, volume = {146}, number = {24}, pages = {7702-7709}, doi = {10.1039/d1an01592c}, pmid = {34812799}, issn = {1364-5528}, mesh = {Biomarkers ; Cloud Computing ; Early Diagnosis ; Humans ; Immunoassay ; *Myocardial Infarction/diagnosis ; *Pain Clinics ; Smartphone ; Troponin I ; Wireless Technology ; }, abstract = {Acute myocardial infarction (AMI) has become a worldwide health problem because of its rapid onset and high mortality. Cardiac troponin I (cTnI) is the gold standard for diagnosis of AMI, and its rapid and accurate detection is critical for early diagnosis and management of AMI. Using a lateral flow immunoassay with upconverting nanoparticles as fluorescent probes, we developed an up-conversion fluorescence reader capable of rapidly quantifying the cTnI concentration in serum based upon the fluorescence intensity of the test and control lines on the test strip. Reliable detection of cTnI in the range 0.1-50 ng mL[-1] could be achieved in 15 min, with a lower detection limit of 0.1 ng mL[-1]. The reader was also adapted for use on a 5th generation (5G) mobile network enabled intelligent chest pain center. Through Bluetooth wireless communication, the results achieved using the reader on an ambulance heading to a central hospital could be transmitted to a 5G smartphone and uploaded for real-time edge computing and cloud storage. An application in the 5G smartphone allows users to upload their medical information to establish dedicated electronic health records and doctors to monitor patients' health status and provide remote medical services. Combined with mobile internet and big data, the 5G-enabled intelligent chest pain center with up-conversion lateral flow immunoassay may predict the onset of AMI and save valuable time for patients suffering an AMI.}, } @article {pmid34812394, year = {2021}, author = {Navaz, AN and Serhani, MA and El Kassabi, HT and Al-Qirim, N and Ismail, H}, title = {Trends, Technologies, and Key Challenges in Smart and Connected Healthcare.}, journal = {IEEE access : practical innovations, open solutions}, volume = {9}, number = {}, pages = {74044-74067}, pmid = {34812394}, issn = {2169-3536}, abstract = {Cardio Vascular Diseases (CVD) is the leading cause of death globally and is increasing at an alarming rate, according to the American Heart Association's Heart Attack and Stroke Statistics-2021. This increase has been further exacerbated because of the current coronavirus (COVID-19) pandemic, thereby increasing the pressure on existing healthcare resources. Smart and Connected Health (SCH) is a viable solution for the prevalent healthcare challenges. It can reshape the course of healthcare to be more strategic, preventive, and custom-designed, making it more effective with value-added services. This research endeavors to classify state-of-the-art SCH technologies via a thorough literature review and analysis to comprehensively define SCH features and identify the enabling technology-related challenges in SCH adoption. We also propose an architectural model that captures the technological aspect of the SCH solution, its environment, and its primary involved stakeholders. It serves as a reference model for SCH acceptance and implementation. We reflected the COVID-19 case study illustrating how some countries have tackled the pandemic differently in terms of leveraging the power of different SCH technologies, such as big data, cloud computing, Internet of Things, artificial intelligence, robotics, blockchain, and mobile applications. In combating the pandemic, SCH has been used efficiently at different stages such as disease diagnosis, virus detection, individual monitoring, tracking, controlling, and resource allocation. Furthermore, this review highlights the challenges to SCH acceptance, as well as the potential research directions for better patient-centric healthcare.}, } @article {pmid34812390, year = {2021}, author = {Dong, Y and Yao, YD}, title = {IoT Platform for COVID-19 Prevention and Control: A Survey.}, journal = {IEEE access : practical innovations, open solutions}, volume = {9}, number = {}, pages = {49929-49941}, pmid = {34812390}, issn = {2169-3536}, abstract = {As a result of the worldwide transmission of severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), coronavirus disease 2019 (COVID-19) has evolved into an unprecedented pandemic. Currently, with unavailable pharmaceutical treatments and low vaccination rates, this novel coronavirus results in a great impact on public health, human society, and global economy, which is likely to last for many years. One of the lessons learned from the COVID-19 pandemic is that a long-term system with non-pharmaceutical interventions for preventing and controlling new infectious diseases is desirable to be implemented. Internet of things (IoT) platform is preferred to be utilized to achieve this goal, due to its ubiquitous sensing ability and seamless connectivity. IoT technology is changing our lives through smart healthcare, smart home, and smart city, which aims to build a more convenient and intelligent community. This paper presents how the IoT could be incorporated into the epidemic prevention and control system. Specifically, we demonstrate a potential fog-cloud combined IoT platform that can be used in the systematic and intelligent COVID-19 prevention and control, which involves five interventions including COVID-19 Symptom Diagnosis, Quarantine Monitoring, Contact Tracing & Social Distancing, COVID-19 Outbreak Forecasting, and SARS-CoV-2 Mutation Tracking. We investigate and review the state-of-the-art literatures of these five interventions to present the capabilities of IoT in countering against the current COVID-19 pandemic or future infectious disease epidemics.}, } @article {pmid34812296, year = {2020}, author = {Wang, Y and Peng, D and Yu, L and Zhang, Y and Yin, J and Zhou, L and Zheng, S and Wang, F and Li, C}, title = {Monitoring Crop Growth During the Period of the Rapid Spread of COVID-19 in China by Remote Sensing.}, journal = {IEEE journal of selected topics in applied earth observations and remote sensing}, volume = {13}, number = {}, pages = {6195-6205}, pmid = {34812296}, issn = {1939-1404}, abstract = {The status of crop growth under the influence of COVID-19 is an important information for evaluating the current food security in China. This article used the cloud computing platform of Google Earth Engine, to access and analyze Sentinel-2, MODIS, and other multisource remote sensing data in the last five years to monitor the growth of crops in China, especially in Hubei province, during the period of the rapid spread of COVID-19 (i.e., from late January to mid-March 2020), and compared with the growth over the same period under similar climate conditions in the past four years. We further analyzed the indirect effects of COVID-19 on crop growth. The results showed that: the area of the crops with better growth (51%) was much more than that with worse growth (22%); the crops with better and worse growth were mainly distributed in the North China Plain (the main planting areas of winter wheat in China) and the South China regions (such as Guangxi, Guangdong province), respectively. The area of the crops with a similar growth occupied 27%. In Hubei province, the area of the crops with better growth (61%) was also more than that with worse growth (27%). It was found that there was no obvious effect from COVID-19 on the overall growth of crops in China during the period from late January to mid-March 2020 and the growth of crops was much better than that during the same period in previous years. The findings in this study are helpful in evaluating the impact of the COVID-19 on China's agriculture, which are conducive to serve the relevant agricultural policy formulation and to ensure food security.}, } @article {pmid34812221, year = {2022}, author = {Nagajayanthi, B}, title = {Decades of Internet of Things Towards Twenty-first Century: A Research-Based Introspective.}, journal = {Wireless personal communications}, volume = {123}, number = {4}, pages = {3661-3697}, pmid = {34812221}, issn = {0929-6212}, abstract = {Internet connects people to people, people to machine, and machine to machine for a life of serendipity through a Cloud. Internet of Things networks objects or people and integrates them with software to collect and exchange data. The Internet of things (IoT) influences our lives based on how we ruminate, respond, and anticipate. IoT 2021 heralds from the fringes to the data ecosystem and panaches a comfort zone. IoT is overwhelmingly embraced by businessmen and consumers due to increased productivity and convenience. Internet of Things facilitates intelligent device control with cloud vendors like Amazon and Google using artificial intelligence for data analytics, and with digital assistants like Alexa and Siri providing a voice user interface. Smart IoT is all about duplex connecting, processing, and implementing. Centralized IoT architecture is vulnerable to cyber-attacks. With Block Chain, it is possible to maintain transparency and security of the transaction's data. Robotic Process Automation (RPA) using bots has automated laborious tasks in 2019. Embedded Internet using Facial Recognition could reduce the coronavirus pandemic crisis by making a paradigm shift from fingerprint sensors to facial recognition. Security concerns are addressed with micro-segmentation approaches. IoT, an incredible vision of the future makes systems adaptive with customized features, responsive with increased efficiency, and procurable with optimized cost. This research delivers a comprehensive insight into the technical perspectives of IoT, focusing on interoperability, flexibility, scalability, mobility, security, transparency, standardization, and low energy. A smart classroom is implemented based on the concepts of IoT.}, } @article {pmid34805975, year = {2021}, author = {Khan, FS and Bao, N}, title = {Quantum Prisoner's Dilemma and High Frequency Trading on the Quantum Cloud.}, journal = {Frontiers in artificial intelligence}, volume = {4}, number = {}, pages = {769392}, pmid = {34805975}, issn = {2624-8212}, abstract = {High-frequency trading (HFT) offers an excellent use case and a potential killer application of the commercially available, first generation quasi-quantum computers. To this end, we offer here a simple game-theoretic model of HFT as the famous two player game, Prisoner's Dilemma. We explore the implementation of HFT as an instance of Prisoner's Dilemma on the (quasi) quantum cloud using the Eisert, Wilkens, and Lewenstein quantum mediated communication protocol, and how this implementation can not only increase transaction speed but also improve the lot of the players in HFT. Using cooperative game-theoretic reasoning, we also note that in the near future when the internet is properly quantum, players will be able to achieve Pareto-optimality in HFT as an instance of reinforced machine learning.}, } @article {pmid34805503, year = {2021}, author = {Farid, M and Latip, R and Hussin, M and Abdul Hamid, NAW}, title = {A fault-intrusion-tolerant system and deadline-aware algorithm for scheduling scientific workflow in the cloud.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e747}, pmid = {34805503}, issn = {2376-5992}, abstract = {BACKGROUND: Recent technological developments have enabled the execution of more scientific solutions on cloud platforms. Cloud-based scientific workflows are subject to various risks, such as security breaches and unauthorized access to resources. By attacking side channels or virtual machines, attackers may destroy servers, causing interruption and delay or incorrect output. Although cloud-based scientific workflows are often used for vital computational-intensive tasks, their failure can come at a great cost.

METHODOLOGY: To increase workflow reliability, we propose the Fault and Intrusion-tolerant Workflow Scheduling algorithm (FITSW). The proposed workflow system uses task executors consisting of many virtual machines to carry out workflow tasks. FITSW duplicates each sub-task three times, uses an intermediate data decision-making mechanism, and then employs a deadline partitioning method to determine sub-deadlines for each sub-task. This way, dynamism is achieved in task scheduling using the resource flow. The proposed technique generates or recycles task executors, keeps the workflow clean, and improves efficiency. Experiments were conducted on WorkflowSim to evaluate the effectiveness of FITSW using metrics such as task completion rate, success rate and completion time.

RESULTS: The results show that FITSW not only raises the success rate by about 12%, it also improves the task completion rate by 6.2% and minimizes the completion time by about 15.6% in comparison with intrusion tolerant scientific workflow ITSW system.}, } @article {pmid34804767, year = {2022}, author = {Quy, VK and Hau, NV and Anh, DV and Ngoc, LA}, title = {Smart healthcare IoT applications based on fog computing: architecture, applications and challenges.}, journal = {Complex & intelligent systems}, volume = {8}, number = {5}, pages = {3805-3815}, pmid = {34804767}, issn = {2198-6053}, abstract = {The history of human development has proven that medical and healthcare applications for humanity always are the main driving force behind the development of science and technology. The advent of Cloud technology for the first time allows providing systems infrastructure as a service, platform as a service and software as a service. Cloud technology has dominated healthcare information systems for decades now. However, one limitation of cloud-based applications is the high service response time. In some emergency scenarios, the control and monitoring of patient status, decision-making with related resources are limited such as hospital, ambulance, doctor, medical conditions in seconds and has a direct impact on the life of patients. To solve these challenges, optimal computing technologies have been proposed such as cloud computing, edge computing, and fog computing technologies. In this article, we make a comparison between computing technologies. Then, we present a common architectural framework based on fog computing for Internet of Health Things (Fog-IoHT) applications. Besides, we also indicate possible applications and challenges in integrating fog computing into IoT Healthcare applications. The analysis results indicated that there is huge potential for IoHT applications based on fog computing. We hope, this study will be an important guide for the future development of fog-based Healthcare IoT applications.}, } @article {pmid34804200, year = {2021}, author = {Hasanin, T and Alsobhi, A and Khadidos, A and Qahmash, A and Khadidos, A and Ogunmola, GA}, title = {Efficient Multiuser Computation for Mobile-Edge Computing in IoT Application Using Optimization Algorithm.}, journal = {Applied bionics and biomechanics}, volume = {2021}, number = {}, pages = {9014559}, pmid = {34804200}, issn = {1176-2322}, abstract = {Mobile edge computing (MEC) is a paradigm novel computing that promises the dramatic effect of reduction in latency and consumption of energy by computation offloading intensive; these tasks to the edge clouds in proximity close to the smart mobile users. In this research, reduce the offloading and latency between the edge computing and multiusers under the environment IoT application in 5G using bald eagle search optimization algorithm. The deep learning approach may consume high computational complexity and more time. In an edge computing system, devices can offload their computation-intensive tasks to the edge servers to save energy and shorten their latency. The bald eagle algorithm (BES) is the advanced optimization algorithm that resembles the strategy of eagle hunting. The strategies are select, search, and swooping stages. Previously, the BES algorithm is used to consume the energy and distance; to improve the better energy and reduce the offloading latency in this research and some delays occur when devices increase causes demand for cloud data, it can be improved by offering ROS (resource) estimation. To enhance the BES algorithm that introduces the ROS estimation stage to select the better ROSs, an edge system, which offloads the most appropriate IoT subtasks to edge servers then the expected time of execution, got minimized. Based on multiuser offloading, we proposed a bald eagle search optimization algorithm that can effectively reduce the end-end time to get fast and near-optimal IoT devices. The latency is reduced from the cloud to the local; this can be overcome by using edge computing, and deep learning expects faster and better results from the network. This can be proposed by BES algorithm technique that is better than other conventional methods that are compared on results to minimize the offloading latency. Then, the simulation is done to show the efficiency and stability by reducing the offloading latency.}, } @article {pmid34801873, year = {2021}, author = {Retico, A and Avanzo, M and Boccali, T and Bonacorsi, D and Botta, F and Cuttone, G and Martelli, B and Salomoni, D and Spiga, D and Trianni, A and Stasi, M and Iori, M and Talamonti, C}, title = {Enhancing the impact of Artificial Intelligence in Medicine: A joint AIFM-INFN Italian initiative for a dedicated cloud-based computing infrastructure.}, journal = {Physica medica : PM : an international journal devoted to the applications of physics to medicine and biology : official journal of the Italian Association of Biomedical Physics (AIFB)}, volume = {91}, number = {}, pages = {140-150}, doi = {10.1016/j.ejmp.2021.10.005}, pmid = {34801873}, issn = {1724-191X}, mesh = {*Artificial Intelligence ; *Cloud Computing ; Humans ; Italy ; Nuclear Physics ; Precision Medicine ; }, abstract = {Artificial Intelligence (AI) techniques have been implemented in the field of Medical Imaging for more than forty years. Medical Physicists, Clinicians and Computer Scientists have been collaborating since the beginning to realize software solutions to enhance the informative content of medical images, including AI-based support systems for image interpretation. Despite the recent massive progress in this field due to the current emphasis on Radiomics, Machine Learning and Deep Learning, there are still some barriers to overcome before these tools are fully integrated into the clinical workflows to finally enable a precision medicine approach to patients' care. Nowadays, as Medical Imaging has entered the Big Data era, innovative solutions to efficiently deal with huge amounts of data and to exploit large and distributed computing resources are urgently needed. In the framework of a collaboration agreement between the Italian Association of Medical Physicists (AIFM) and the National Institute for Nuclear Physics (INFN), we propose a model of an intensive computing infrastructure, especially suited for training AI models, equipped with secure storage systems, compliant with data protection regulation, which will accelerate the development and extensive validation of AI-based solutions in the Medical Imaging field of research. This solution can be developed and made operational by Physicists and Computer Scientists working on complementary fields of research in Physics, such as High Energy Physics and Medical Physics, who have all the necessary skills to tailor the AI-technology to the needs of the Medical Imaging community and to shorten the pathway towards the clinical applicability of AI-based decision support systems.}, } @article {pmid34798231, year = {2021}, author = {Dong, L and Li, J and Zou, Q and Zhang, Y and Zhao, L and Wen, X and Gong, J and Li, F and Liu, T and Evans, AC and Valdes-Sosa, PA and Yao, D}, title = {WeBrain: A web-based brainformatics platform of computational ecosystem for EEG big data analysis.}, journal = {NeuroImage}, volume = {245}, number = {}, pages = {118713}, doi = {10.1016/j.neuroimage.2021.118713}, pmid = {34798231}, issn = {1095-9572}, mesh = {Big Data ; *Cloud Computing ; *Computational Biology ; *Electroencephalography ; Humans ; Software ; Systems Integration ; }, abstract = {The current evolution of 'cloud neuroscience' leads to more efforts with the large-scale EEG applications, by using EEG pipelines to handle the rapidly accumulating EEG data. However, there are a few specific cloud platforms that seek to address the cloud computational challenges of EEG big data analysis to benefit the EEG community. In response to the challenges, a WeBrain cloud platform (https://webrain.uestc.edu.cn/) is designed as a web-based brainformatics platform and computational ecosystem to enable large-scale EEG data storage, exploration and analysis using cloud high-performance computing (HPC) facilities. WeBrain connects researchers from different fields to EEG and multimodal tools that have become the norm in the field and the cloud processing power required to handle those large EEG datasets. This platform provides an easy-to-use system for novice users (even no computer programming skills) and provides satisfactory maintainability, sustainability and flexibility for IT administrators and tool developers. A range of resources are also available on https://webrain.uestc.edu.cn/, including documents, manuals, example datasets related to WeBrain, and collected links to open EEG datasets and tools. It is not necessary for users or administrators to install any software or system, and all that is needed is a modern web browser, which reduces the technical expertise required to use or manage WeBrain. The WeBrain platform is sponsored and driven by the China-Canada-Cuba international brain cooperation project (CCC-Axis, http://ccc-axis.org/), and we hope that WeBrain will be a promising cloud brainformatics platform for exploring brain information in large-scale EEG applications in the EEG community.}, } @article {pmid34777973, year = {2021}, author = {Moursi, AS and El-Fishawy, N and Djahel, S and Shouman, MA}, title = {An IoT enabled system for enhanced air quality monitoring and prediction on the edge.}, journal = {Complex & intelligent systems}, volume = {7}, number = {6}, pages = {2923-2947}, pmid = {34777973}, issn = {2198-6053}, abstract = {Air pollution is a major issue resulting from the excessive use of conventional energy sources in developing countries and worldwide. Particulate Matter less than 2.5 µm in diameter (PM2.5) is the most dangerous air pollutant invading the human respiratory system and causing lung and heart diseases. Therefore, innovative air pollution forecasting methods and systems are required to reduce such risk. To that end, this paper proposes an Internet of Things (IoT) enabled system for monitoring and predicting PM2.5 concentration on both edge devices and the cloud. This system employs a hybrid prediction architecture using several Machine Learning (ML) algorithms hosted by Nonlinear AutoRegression with eXogenous input (NARX). It uses the past 24 h of PM2.5, cumulated wind speed and cumulated rain hours to predict the next hour of PM2.5. This system was tested on a PC to evaluate cloud prediction and a Raspberry P i to evaluate edge devices' prediction. Such a system is essential, responding quickly to air pollution in remote areas with low bandwidth or no internet connection. The performance of our system was assessed using Root Mean Square Error (RMSE), Normalized Root Mean Square Error (NRMSE), coefficient of determination (R [2]), Index of Agreement (IA), and duration in seconds. The obtained results highlighted that NARX/LSTM achieved the highest R [2] and IA and the least RMSE and NRMSE, outperforming other previously proposed deep learning hybrid algorithms. In contrast, NARX/XGBRF achieved the best balance between accuracy and speed on the Raspberry P i .}, } @article {pmid34777904, year = {2021}, author = {Zhang, M and Dai, D and Hou, S and Liu, W and Gao, F and Xu, D and Hu, Y}, title = {Thinking on the informatization development of China's healthcare system in the post-COVID-19 era.}, journal = {Intelligent medicine}, volume = {1}, number = {1}, pages = {24-28}, pmid = {34777904}, issn = {2667-1026}, abstract = {With the application of Internet of Things, big data, cloud computing, artificial intelligence, and other cutting-edge technologies, China's medical informatization is developing rapidly. In this paper, we summaried the role of information technology in healthcare sector's battle against the coronavirus disease 2019 (COVID-19) from the perspectives of early warning and monitoring, screening and diagnosis, medical treatment and scientific research, analyzes the bottlenecks of the development of information technology in the post-COVID-19 era, and puts forward feasible suggestions for further promoting the construction of medical informatization from the perspectives of sharing, convenience, and safety.}, } @article {pmid34777850, year = {2021}, author = {Alipour, J and Mehdipour, Y and Karimi, A and Sharifian, R}, title = {Affecting factors of cloud computing adoption in public hospitals affiliated with Zahedan University of Medical Sciences: A cross-sectional study in the Southeast of Iran.}, journal = {Digital health}, volume = {7}, number = {}, pages = {20552076211033428}, pmid = {34777850}, issn = {2055-2076}, abstract = {OBJECTIVE: Health care organizations require cloud computing to remain efficient and cost-effective, and provide high-quality health care services. Adoption of this technology by users plays a critical role in the success of its application. This study aimed to determine factors affecting cloud computing adoption in public hospitals affiliated with Zahedan University of Medical Sciences.

METHODS: A cross-sectional descriptive and analytic study was performed in 2017. The study population comprised information technology and hospital information system authorities and hospital information system users. The sample consisted of 573 participants. The data were collected using a questionnaire and analyzed with the Statistical Package for Social Sciences software using descriptive and analytical statistics.

RESULTS: The mean score of environmental, human, organizational, technological, and intention dimensions of cloud computing adoption was 3.39 ± 0.81, 3.27 ± 0.63, 3.19 ± 0.71, 3 ± 0.43, and 3.55 ± 1.10, respectively. Furthermore, a significant positive relationship was found between intention of cloud computing adoption and environmental (R = 0.521, p = 0.000), organizational (R = 0.426, p = 0.000), human (R = 0.492, p = 0.000), and technological dimensions (R = 0.157, p = 0.000).

CONCLUSIONS: Benefits of cloud computing adoption, relative advantage, and competitive pressure were identified as the most influential factors in accepting cloud computing. Simplifying the users' understanding of this technology and its application, improving the staff's technical capabilities, promoting executive managers' understanding of the nature and functions of cloud computing, and fully supporting and increasing governmental mandates for adoption of new technologies are necessary for facilitating the adoption of cloud computing in given hospitals.}, } @article {pmid34776626, year = {2022}, author = {Durai, CAD and Begum, A and Jebaseeli, J and Sabahath, A}, title = {COVID-19 pandemic, predictions and control in Saudi Arabia using SIR-F and age-structured SEIR model.}, journal = {The Journal of supercomputing}, volume = {78}, number = {5}, pages = {7341-7353}, pmid = {34776626}, issn = {0920-8542}, abstract = {COVID-19 has affected every individual physically or physiologically, leading to substantial impacts on how they perceive and respond to the pandemic's danger. Due to the lack of vaccines or effective medicines to cure the infection, an urgent control measure is required to prevent the continued spread of COVID-19. This can be achieved using advanced computing, such as artificial intelligence (AI), machine learning (ML), deep learning (DL), cloud computing, and edge computing. To control the exponential spread of the novel virus, it is crucial for countries to contain and mitigate interventions. To prevent exponential growth, several control measures have been applied in the Kingdom of Saudi Arabia to mitigate the COVID-19 epidemic. As the pandemic has been spreading globally for more than a year, an ample amount of data is available for researchers to predict and forecast the effect of the pandemic in the near future. This article interprets the effects of COVID-19 using the Susceptible-Infected-Recovered (SIR-F) while F-stands for 'Fatal with confirmation,' age-structured SEIR (Susceptible Exposed Infectious Removed) and machine learning for smart health care and the well-being of citizens of Saudi Arabia. Additionally, it examines the different control measure scenarios produced by the modified SEIR model. The evolution of the simulation results shows that the interventions are vital to flatten the virus spread curve, which can delay the peak and decrease the fatality rate.}, } @article {pmid36654109, year = {2021}, author = {Xu, X and Sun, J and Endo, S and Li, Y and Benjamin, SC and Yuan, X}, title = {Variational algorithms for linear algebra.}, journal = {Science bulletin}, volume = {66}, number = {21}, pages = {2181-2188}, doi = {10.1016/j.scib.2021.06.023}, pmid = {36654109}, issn = {2095-9281}, abstract = {Quantum algorithms have been developed for efficiently solving linear algebra tasks. However, they generally require deep circuits and hence universal fault-tolerant quantum computers. In this work, we propose variational algorithms for linear algebra tasks that are compatible with noisy intermediate-scale quantum devices. We show that the solutions of linear systems of equations and matrix-vector multiplications can be translated as the ground states of the constructed Hamiltonians. Based on the variational quantum algorithms, we introduce Hamiltonian morphing together with an adaptive ansätz for efficiently finding the ground state, and show the solution verification. Our algorithms are especially suitable for linear algebra problems with sparse matrices, and have wide applications in machine learning and optimisation problems. The algorithm for matrix multiplications can be also used for Hamiltonian simulation and open system simulation. We evaluate the cost and effectiveness of our algorithm through numerical simulations for solving linear systems of equations. We implement the algorithm on the IBM quantum cloud device with a high solution fidelity of 99.95%.}, } @article {pmid34770615, year = {2021}, author = {Ala'anzy, MA and Othman, M and Hanapi, ZM and Alrshah, MA}, title = {Locust Inspired Algorithm for Cloudlet Scheduling in Cloud Computing Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {21}, pages = {}, pmid = {34770615}, issn = {1424-8220}, mesh = {Algorithms ; Animals ; *Cloud Computing ; Computers ; *Grasshoppers ; Heuristics ; }, abstract = {Cloud computing is an emerging paradigm that offers flexible and seamless services for users based on their needs, including user budget savings. However, the involvement of a vast number of cloud users has made the scheduling of users' tasks (i.e., cloudlets) a challenging issue in selecting suitable data centres, servers (hosts), and virtual machines (VMs). Cloudlet scheduling is an NP-complete problem that can be solved using various meta-heuristic algorithms, which are quite popular due to their effectiveness. Massive user tasks and rapid growth in cloud resources have become increasingly complex challenges; therefore, an efficient algorithm is necessary for allocating cloudlets efficiently to attain better execution times, resource utilisation, and waiting times. This paper proposes a cloudlet scheduling, locust inspired algorithm to reduce the average makespan and waiting time and to boost VM and server utilisation. The CloudSim toolkit was used to evaluate our algorithm's efficiency, and the obtained results revealed that our algorithm outperforms other state-of-the-art nature-inspired algorithms, improving the average makespan, waiting time, and resource utilisation.}, } @article {pmid34770606, year = {2021}, author = {Singamaneni, KK and Ramana, K and Dhiman, G and Singh, S and Yoon, B}, title = {A Novel Blockchain and Bi-Linear Polynomial-Based QCP-ABE Framework for Privacy and Security over the Complex Cloud Data.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {21}, pages = {}, pmid = {34770606}, issn = {1424-8220}, support = {2019R1A2C1085388//National Research Foundation of Korea/ ; S-2020-G0001-00050.//Dongguk University Research Fund/ ; }, mesh = {Algorithms ; *Blockchain ; Cloud Computing ; Computer Security ; Privacy ; }, abstract = {As a result of the limited resources available in IoT local devices, the large scale cloud consumer's data that are produced by IoT related machines are contracted out to the cloud. Cloud computing is unreliable, using it can compromise user privacy, and data may be leaked. Because cloud-data and grid infrastructure are both growing exponentially, there is an urgent need to explore computational sources and cloud large-data protection. Numerous cloud service categories are assimilated into numerous fields, such as defense systems and pharmaceutical databases, to compute information space and allocation of resources. Attribute Based Encryption (ABE) is a sophisticated approach which can permit employees to specify a higher level of security for data stored in cloud storage facilities. Numerous obsolete ABE techniques are practical when applied to small data sets to generate cryptograms with restricted computational properties; their properties are used to generate the key, encrypt it, and decrypt it. To address the current concerns, a dynamic non-linear polynomial chaotic quantum hash technique on top of secure block chain model can be used for enhancing cloud data security while maintaining user privacy. In the proposed method, customer attributes are guaranteed by using a dynamic non- polynomial chaotic map function for the key initialization, encryption, and decryption. In the proposed model, both organized and unorganized massive clinical data are considered to be inputs for reliable corroboration and encoding. Compared to existing models, the real-time simulation results demonstrate that the stated standard is more precise than 90% in terms of bit change and more precise than 95% in terms of dynamic key generation, encipherment, and decipherment time.}, } @article {pmid34770582, year = {2021}, author = {Roig, PJ and Alcaraz, S and Gilly, K and Bernad, C and Juiz, C}, title = {Modeling of a Generic Edge Computing Application Design.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {21}, pages = {}, pmid = {34770582}, issn = {1424-8220}, mesh = {*Cloud Computing ; }, abstract = {Edge computing applications leverage advances in edge computing along with the latest trends of convolutional neural networks in order to achieve ultra-low latency, high-speed processing, low-power consumptions scenarios, which are necessary for deploying real-time Internet of Things deployments efficiently. As the importance of such scenarios is growing by the day, we propose to undertake two different kind of models, such as an algebraic models, with a process algebra called ACP and a coding model with a modeling language called Promela. Both approaches have been used to build models considering an edge infrastructure with a cloud backup, which has been further extended with the addition of extra fog nodes, and after having applied the proper verification techniques, they have all been duly verified. Specifically, a generic edge computing design has been specified in an algebraic manner with ACP, being followed by its corresponding algebraic verification, whereas it has also been specified by means of Promela code, which has been verified by means of the model checker Spin.}, } @article {pmid34770545, year = {2021}, author = {Ahmad, Z and Jehangiri, AI and Ala'anzy, MA and Othman, M and Umar, AI}, title = {Fault-Tolerant and Data-Intensive Resource Scheduling and Management for Scientific Applications in Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {21}, pages = {}, pmid = {34770545}, issn = {1424-8220}, mesh = {*Algorithms ; *Cloud Computing ; Computer Simulation ; Heuristics ; Workflow ; }, abstract = {Cloud computing is a fully fledged, matured and flexible computing paradigm that provides services to scientific and business applications in a subscription-based environment. Scientific applications such as Montage and CyberShake are organized scientific workflows with data and compute-intensive tasks and also have some special characteristics. These characteristics include the tasks of scientific workflows that are executed in terms of integration, disintegration, pipeline, and parallelism, and thus require special attention to task management and data-oriented resource scheduling and management. The tasks executed during pipeline are considered as bottleneck executions, the failure of which result in the wholly futile execution, which requires a fault-tolerant-aware execution. The tasks executed during parallelism require similar instances of cloud resources, and thus, cluster-based execution may upgrade the system performance in terms of make-span and execution cost. Therefore, this research work presents a cluster-based, fault-tolerant and data-intensive (CFD) scheduling for scientific applications in cloud environments. The CFD strategy addresses the data intensiveness of tasks of scientific workflows with cluster-based, fault-tolerant mechanisms. The Montage scientific workflow is considered as a simulation and the results of the CFD strategy were compared with three well-known heuristic scheduling policies: (a) MCT, (b) Max-min, and (c) Min-min. The simulation results showed that the CFD strategy reduced the make-span by 14.28%, 20.37%, and 11.77%, respectively, as compared with the existing three policies. Similarly, the CFD reduces the execution cost by 1.27%, 5.3%, and 2.21%, respectively, as compared with the existing three policies. In case of the CFD strategy, the SLA is not violated with regard to time and cost constraints, whereas it is violated by the existing policies numerous times.}, } @article {pmid34770533, year = {2021}, author = {da Costa Bezerra, SF and Filho, ASM and Delicato, FC and da Rocha, AR}, title = {Processing Complex Events in Fog-Based Internet of Things Systems for Smart Agriculture.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {21}, pages = {}, pmid = {34770533}, issn = {1424-8220}, mesh = {Agriculture ; Cloud Computing ; *Internet of Things ; }, abstract = {The recent growth of the Internet of Things' services and applications has increased data processing and storage requirements. The Edge computing concept aims to leverage the processing capabilities of the IoT and other devices placed at the edge of the network. One embodiment of this paradigm is Fog computing, which provides an intermediate and often hierarchical processing tier between the data sources and the remote Cloud. Among the major benefits of this concept, the end-to-end latency can be decreased, thus favoring time-sensitive applications. Moreover, the data traffic at the network core and the Cloud computing workload can be reduced. Combining the Fog computing paradigm with Complex Event Processing (CEP) and data fusion techniques has excellent potential for generating valuable knowledge and aiding decision-making processes in the Internet of Things' systems. In this context, we propose a multi-tier complex event processing approach (sensor node, Fog, and Cloud) that promotes fast decision making and is based on information with 98% accuracy. The experiments show a reduction of 77% in the average time of sending messages in the network. In addition, we achieved a reduction of 82% in data traffic.}, } @article {pmid34770496, year = {2021}, author = {Matesanz, P and Graen, T and Fiege, A and Nolting, M and Nejdl, W}, title = {Demand-Driven Data Acquisition for Large Scale Fleets.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {21}, pages = {}, pmid = {34770496}, issn = {1424-8220}, support = {01 MD 19007A//Federal Ministry for Economic Affairs and Energy/ ; }, mesh = {Humans ; *Software ; }, abstract = {Automakers manage vast fleets of connected vehicles and face an ever-increasing demand for their sensor readings. This demand originates from many stakeholders, each potentially requiring different sensors from different vehicles. Currently, this demand remains largely unfulfilled due to a lack of systems that can handle such diverse demands efficiently. Vehicles are usually passive participants in data acquisition, each continuously reading and transmitting the same static set of sensors. However, in a multi-tenant setup with diverse data demands, each vehicle potentially needs to provide different data instead. We present a system that performs such vehicle-specific minimization of data acquisition by mapping individual data demands to individual vehicles. We collect personal data only after prior consent and fulfill the requirements of the GDPR. Non-personal data can be collected by directly addressing individual vehicles. The system consists of a software component natively integrated with a major automaker's vehicle platform and a cloud platform brokering access to acquired data. Sensor readings are either provided via near real-time streaming or as recorded trip files that provide specific consistency guarantees. A performance evaluation with over 200,000 simulated vehicles has shown that our system can increase server capacity on-demand and process streaming data within 269 ms on average during peak load. The resulting architecture can be used by other automakers or operators of large sensor networks. Native vehicle integration is not mandatory; the architecture can also be used with retrofitted hardware such as OBD readers.}, } @article {pmid34770413, year = {2021}, author = {Calvo, I and Villar, E and Napole, C and Fernández, A and Barambones, O and Gil-García, JM}, title = {Reliable Control Applications with Wireless Communication Technologies: Application to Robotic Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {21}, pages = {}, pmid = {34770413}, issn = {1424-8220}, support = {EKOHEGAZ (ELKARTEK KK-2021/00092)//Eusko Jaurlaritza/ ; GIU20/063//Euskal Herriko Unibertsitatea (UPV/EHU)/ ; CONAVANTER//Diputación Foral de Alava (DFA)/ ; }, mesh = {Communication ; Fuzzy Logic ; Reproducibility of Results ; *Robotic Surgical Procedures ; Wireless Technology ; }, abstract = {The nature of wireless propagation may reduce the QoS of the applications, such that some packages can be delayed or lost. For this reason, the design of wireless control applications must be faced in a holistic way to avoid degrading the performance of the control algorithms. This paper is aimed at improving the reliability of wireless control applications in the event of communication degradation or temporary loss at the wireless links. Two controller levels are used: sophisticated algorithms providing better performance are executed in a central node, whereas local independent controllers, implemented as back-up controllers, are executed next to the process in case of QoS degradation. This work presents a reliable strategy for switching between central and local controllers avoiding that plants may become uncontrolled. For validation purposes, the presented approach was used to control a planar robot. A Fuzzy Logic control algorithm was implemented as a main controller at a high performance computing platform. A back-up controller was implemented on an edge device. This approach avoids the robot becoming uncontrolled in case of communication failure. Although a planar robot was chosen in this work, the presented approach may be extended to other processes. XBee 900 MHz communication technology was selected for control tasks, leaving the 2.4 GHz band for integration with cloud services. Several experiments are presented to analyze the behavior of the control application under different circumstances. The results proved that our approach allows the use of wireless communications, even in critical control applications.}, } @article {pmid34770308, year = {2021}, author = {Simić, M and Sladić, G and Zarić, M and Markoski, B}, title = {Infrastructure as Software in Micro Clouds at the Edge.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {21}, pages = {}, pmid = {34770308}, issn = {1424-8220}, abstract = {Edge computing offers cloud services closer to data sources and end-users, making the foundation for novel applications. The infrastructure deployment is taking off, bringing new challenges: how to use geo-distribution properly, or harness the advantages of having resources at a specific location? New real-time applications require multi-tier infrastructure, preferably doing data preprocessing locally, but using the cloud for heavy workloads. We present a model, able to organize geo-distributed nodes into micro clouds dynamically, allowing resource reorganization to best serve population needs. Such elasticity is achieved by relying on cloud organization principles, adapted for a different environment. The desired state is specified descriptively, and the system handles the rest. As such, infrastructure is abstracted to the software level, thus enabling "infrastructure as software" at the edge. We argue about blending the proposed model into existing tools, allowing cloud providers to offer future micro clouds as a service.}, } @article {pmid34770286, year = {2021}, author = {Kil, BH and Park, JS and Ryu, MH and Park, CY and Kim, YS and Kim, JD}, title = {Cloud-Based Software Architecture for Fully Automated Point-of-Care Molecular Diagnostic Device.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {21}, pages = {}, pmid = {34770286}, issn = {1424-8220}, support = {HW20C2053//Korea Health Industry Development Institute/Republic of Korea ; }, mesh = {*Cloud Computing ; Computers ; Pathology, Molecular ; *Point-of-Care Systems ; Software ; }, abstract = {This paper proposes a cloud-based software architecture for fully automated point-of-care molecular diagnostic devices. The target system operates a cartridge consisting of an extraction body for DNA extraction and a PCR chip for amplification and fluorescence detection. To facilitate control and monitoring via the cloud, a socket server was employed for fundamental molecular diagnostic functions such as DNA extraction, amplification, and fluorescence detection. The user interface for experimental control and monitoring was constructed with the RESTful application programming interface, allowing access from the terminal device, edge, and cloud. Furthermore, it can also be accessed through any web-based user interface on smart computing devices such as smart phones or tablets. An emulator with the proposed software architecture was fabricated to validate successful operation.}, } @article {pmid34770256, year = {2021}, author = {Aleisa, MA and Abuhussein, A and Alsubaei, FS and Sheldon, FT}, title = {Examining the Performance of Fog-Aided, Cloud-Centered IoT in a Real-World Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {21}, pages = {}, pmid = {34770256}, issn = {1424-8220}, mesh = {*Cloud Computing ; }, abstract = {The fog layer provides substantial benefits in cloud-based IoT applications because it can serve as an aggregation layer and it moves the computation resources nearer to the IoT devices; however, it is important to ensure adequate performance is achieved in such applications, as the devices usually communicate frequently and authenticate with the cloud. This can cause performance and availability issues, which can be dangerous in critical applications such as in the healthcare sector. In this paper, we analyze the efficacy of the fog layer in different architectures in a real-world environment by examining performance metrics for the cloud and fog layers using different numbers of IoT devices. We also implement the fog layer using two methods to determine whether different fog implementation frameworks can affect the performance. The results show that including a fog layer with semi-heavyweight computation capability results in higher capital costs, although the in the long run resources, time, and money are saved. This study can serve as a reference for fundamental fog computing concepts. It can also be used to walk practitioners through different implementation frameworks of fog-aided IoT and to show tradeoffs in order to inform when to use each implementation framework based on one's objectives.}, } @article {pmid34766274, year = {2022}, author = {Lacar, B}, title = {Generation of Centered Log-Ratio Normalized Antibody-Derived Tag Counts from Large Single-Cell Sequencing Datasets.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2386}, number = {}, pages = {203-217}, pmid = {34766274}, issn = {1940-6029}, mesh = {Antibodies ; Gene Expression Profiling ; High-Throughput Nucleotide Sequencing ; Sequence Analysis, RNA ; *Single-Cell Analysis ; }, abstract = {Recent developments in single-cell analysis has provided the ability to assay >50 surface-level proteins by combining oligo-conjugated antibodies with sequencing technology. These methods, such as CITE-seq and REAP-seq, have added another modality to single-cell analysis, enhancing insight across many biological subdisciplines. While packages like Seurat have greatly facilitated analysis of single-cell protein expression, the practical steps to carry out the analysis with increasingly larger datasets have been fragmented. In addition, using data visualizations, I will highlight some details about the centered log-ratio (CLR) normalization of antibody-derived tag (ADT) counts that may be overlooked. In this method chapter, I provide detailed steps to generate CLR-normalized CITE-seq data using cloud computing from a large CITE-seq dataset.}, } @article {pmid34760334, year = {2021}, author = {Bhawsar, PMS and Abubakar, M and Schmidt, MK and Camp, NJ and Cessna, MH and Duggan, MA and García-Closas, M and Almeida, JS}, title = {Browser-based Data Annotation, Active Learning, and Real-Time Distribution of Artificial Intelligence Models: From Tumor Tissue Microarrays to COVID-19 Radiology.}, journal = {Journal of pathology informatics}, volume = {12}, number = {}, pages = {38}, pmid = {34760334}, issn = {2229-5089}, abstract = {BACKGROUND: Artificial intelligence (AI) is fast becoming the tool of choice for scalable and reliable analysis of medical images. However, constraints in sharing medical data outside the institutional or geographical space, as well as difficulties in getting AI models and modeling platforms to work across different environments, have led to a "reproducibility crisis" in digital medicine.

METHODS: This study details the implementation of a web platform that can be used to mitigate these challenges by orchestrating a digital pathology AI pipeline, from raw data to model inference, entirely on the local machine. We discuss how this federated platform provides governed access to data by consuming the Application Program Interfaces exposed by cloud storage services, allows the addition of user-defined annotations, facilitates active learning for training models iteratively, and provides model inference computed directly in the web browser at practically zero cost. The latter is of particular relevance to clinical workflows because the code, including the AI model, travels to the user's data, which stays private to the governance domain where it was acquired.

RESULTS: We demonstrate that the web browser can be a means of democratizing AI and advancing data socialization in medical imaging backed by consumer-facing cloud infrastructure such as Box.com. As a case study, we test the accompanying platform end-to-end on a large dataset of digital breast cancer tissue microarray core images. We also showcase how it can be applied in contexts separate from digital pathology by applying it to a radiology dataset containing COVID-19 computed tomography images.

CONCLUSIONS: The platform described in this report resolves the challenges to the findable, accessible, interoperable, reusable stewardship of data and AI models by integrating with cloud storage to maintain user-centric governance over the data. It also enables distributed, federated computation for AI inference over those data and proves the viability of client-side AI in medical imaging.

AVAILABILITY: The open-source application is publicly available at , with a short video demonstration at .}, } @article {pmid34750391, year = {2021}, author = {Chatenoux, B and Richard, JP and Small, D and Roeoesli, C and Wingate, V and Poussin, C and Rodila, D and Peduzzi, P and Steinmeier, C and Ginzler, C and Psomas, A and Schaepman, ME and Giuliani, G}, title = {The Swiss data cube, analysis ready data archive using earth observations of Switzerland.}, journal = {Scientific data}, volume = {8}, number = {1}, pages = {295}, pmid = {34750391}, issn = {2052-4463}, abstract = {Since the opening of Earth Observation (EO) archives (USGS/NASA Landsat and EC/ESA Sentinels), large collections of EO data are freely available, offering scientists new possibilities to better understand and quantify environmental changes. Fully exploiting these satellite EO data will require new approaches for their acquisition, management, distribution, and analysis. Given rapid environmental changes and the emergence of big data, innovative solutions are needed to support policy frameworks and related actions toward sustainable development. Here we present the Swiss Data Cube (SDC), unleashing the information power of Big Earth Data for monitoring the environment, providing Analysis Ready Data over the geographic extent of Switzerland since 1984, which is updated on a daily basis. Based on a cloud-computing platform allowing to access, visualize and analyse optical (Sentinel-2; Landsat 5, 7, 8) and radar (Sentinel-1) imagery, the SDC minimizes the time and knowledge required for environmental analyses, by offering consistent calibrated and spatially co-registered satellite observations. SDC derived analysis ready data supports generation of environmental information, allowing to inform a variety of environmental policies with unprecedented timeliness and quality.}, } @article {pmid34749633, year = {2021}, author = {Tangaro, MA and Mandreoli, P and Chiara, M and Donvito, G and Antonacci, M and Parisi, A and Bianco, A and Romano, A and Bianchi, DM and Cangelosi, D and Uva, P and Molineris, I and Nosi, V and Calogero, RA and Alessandri, L and Pedrini, E and Mordenti, M and Bonetti, E and Sangiorgi, L and Pesole, G and Zambelli, F}, title = {Laniakea@ReCaS: exploring the potential of customisable Galaxy on-demand instances as a cloud-based service.}, journal = {BMC bioinformatics}, volume = {22}, number = {Suppl 15}, pages = {544}, pmid = {34749633}, issn = {1471-2105}, support = {653549//Horizon 2020 Framework Programme/ ; 857650//Horizon 2020 Framework Programme/ ; 824087//Horizon 2020 Framework Programme/ ; }, mesh = {*COVID-19 ; *Cloud Computing ; Computational Biology ; Humans ; SARS-CoV-2 ; Software ; }, abstract = {BACKGROUND: Improving the availability and usability of data and analytical tools is a critical precondition for further advancing modern biological and biomedical research. For instance, one of the many ramifications of the COVID-19 global pandemic has been to make even more evident the importance of having bioinformatics tools and data readily actionable by researchers through convenient access points and supported by adequate IT infrastructures. One of the most successful efforts in improving the availability and usability of bioinformatics tools and data is represented by the Galaxy workflow manager and its thriving community. In 2020 we introduced Laniakea, a software platform conceived to streamline the configuration and deployment of "on-demand" Galaxy instances over the cloud. By facilitating the set-up and configuration of Galaxy web servers, Laniakea provides researchers with a powerful and highly customisable platform for executing complex bioinformatics analyses. The system can be accessed through a dedicated and user-friendly web interface that allows the Galaxy web server's initial configuration and deployment.

RESULTS: "Laniakea@ReCaS", the first instance of a Laniakea-based service, is managed by ELIXIR-IT and was officially launched in February 2020, after about one year of development and testing that involved several users. Researchers can request access to Laniakea@ReCaS through an open-ended call for use-cases. Ten project proposals have been accepted since then, totalling 18 Galaxy on-demand virtual servers that employ ~ 100 CPUs, ~ 250 GB of RAM and ~ 5 TB of storage and serve several different communities and purposes. Herein, we present eight use cases demonstrating the versatility of the platform.

CONCLUSIONS: During this first year of activity, the Laniakea-based service emerged as a flexible platform that facilitated the rapid development of bioinformatics tools, the efficient delivery of training activities, and the provision of public bioinformatics services in different settings, including food safety and clinical research. Laniakea@ReCaS provides a proof of concept of how enabling access to appropriate, reliable IT resources and ready-to-use bioinformatics tools can considerably streamline researchers' work.}, } @article {pmid34746087, year = {2021}, author = {Shah, A and Ahirrao, S and Pandya, S and Kotecha, K and Rathod, S}, title = {Smart Cardiac Framework for an Early Detection of Cardiac Arrest Condition and Risk.}, journal = {Frontiers in public health}, volume = {9}, number = {}, pages = {762303}, pmid = {34746087}, issn = {2296-2565}, mesh = {Bayes Theorem ; *Heart Arrest/diagnosis ; Humans ; *Machine Learning ; Neural Networks, Computer ; Support Vector Machine ; }, abstract = {Cardiovascular disease (CVD) is considered to be one of the most epidemic diseases in the world today. Predicting CVDs, such as cardiac arrest, is a difficult task in the area of healthcare. The healthcare industry has a vast collection of datasets for analysis and prediction purposes. Somehow, the predictions made on these publicly available datasets may be erroneous. To make the prediction accurate, real-time data need to be collected. This study collected real-time data using sensors and stored it on a cloud computing platform, such as Google Firebase. The acquired data is then classified using six machine-learning algorithms: Artificial Neural Network (ANN), Random Forest Classifier (RFC), Gradient Boost Extreme Gradient Boosting (XGBoost) classifier, Support Vector Machine (SVM), Naïve Bayes (NB), and Decision Tree (DT). Furthermore, we have presented two novel gender-based risk classification and age-wise risk classification approach in the undertaken study. The presented approaches have used Kaplan-Meier and Cox regression survival analysis methodologies for risk detection and classification. The presented approaches also assist health experts in identifying the risk probability risk and the 10-year risk score prediction. The proposed system is an economical alternative to the existing system due to its low cost. The outcome obtained shows an enhanced level of performance with an overall accuracy of 98% using DT on our collected dataset for cardiac risk prediction. We also introduced two risk classification models for gender- and age-wise people to detect their survival probability. The outcome of the proposed model shows accurate probability in both classes.}, } @article {pmid34745491, year = {2021}, author = {Fu, X and Wang, Y and Belkacem, AN and Zhang, Q and Xie, C and Cao, Y and Cheng, H and Chen, S}, title = {Integrating Optimized Multiscale Entropy Model with Machine Learning for the Localization of Epileptogenic Hemisphere in Temporal Lobe Epilepsy Using Resting-State fMRI.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {1834123}, pmid = {34745491}, issn = {2040-2309}, mesh = {Brain ; Entropy ; *Epilepsy, Temporal Lobe/diagnostic imaging ; Functional Laterality ; Humans ; Machine Learning ; Magnetic Resonance Imaging ; }, abstract = {The bottleneck associated with the validation of the parameters of the entropy model has limited the application of this model to modern functional imaging technologies such as the resting-state functional magnetic resonance imaging (rfMRI). In this study, an optimization algorithm that could choose the parameters of the multiscale entropy (MSE) model was developed, while the optimized effectiveness for localizing the epileptogenic hemisphere was validated through the classification rate with a supervised machine learning method. The rfMRI data of 20 mesial temporal lobe epilepsy patients with positive indicators (the indicators of epileptogenic hemisphere in clinic) in the hippocampal formation on either left or right hemisphere (equally divided into two groups) on the structural MRI were collected and preprocessed. Then, three parameters in the MSE model were statistically optimized by both receiver operating characteristic (ROC) curve and the area under the ROC curve value in the sensitivity analysis, and the intergroup significance of optimized entropy values was utilized to confirm the biomarked brain areas sensitive to the epileptogenic hemisphere. Finally, the optimized entropy values of these biomarked brain areas were regarded as the feature vectors input for a support vector machine to classify the epileptogenic hemisphere, and the classification effectiveness was cross-validated. Nine biomarked brain areas were confirmed by the optimized entropy values, including medial superior frontal gyrus and superior parietal gyrus (p < .01). The mean classification accuracy was greater than 90%. It can be concluded that combination of the optimized MSE model with the machine learning model can accurately confirm the epileptogenic hemisphere by rfMRI. With the powerful information interaction capabilities of 5G communication, the epilepsy side-fixing algorithm that requires computing power can be integrated into a cloud platform. The demand side only needs to upload patient data to the service platform to realize the preoperative assessment of epilepsy.}, } @article {pmid34744856, year = {2021}, author = {Smart, PR}, title = {Shedding Light on the Extended Mind: HoloLens, Holograms, and Internet-Extended Knowledge.}, journal = {Frontiers in psychology}, volume = {12}, number = {}, pages = {675184}, pmid = {34744856}, issn = {1664-1078}, abstract = {The application of extended mind theory to the Internet and Web yields the possibility of Internet-extended knowledge-a form of extended knowledge that arises as a result of an individual's interactions with the online environment. The present paper seeks to advance our understanding of Internet-extended knowledge by describing the functionality of a real-world application, called the HoloArt app. In part, the goal of the paper is illustrative: it is intended to show how recent advances in mixed reality, cloud-computing, and machine intelligence might be combined so as to yield a putative case of Internet-extended knowledge. Beyond this, however, the paper is intended to support the philosophical effort to understand the notions of extended knowledge and the extended mind. In particular, the HoloArt app raises questions about the universality of some of the criteria that have been used to evaluate putative cases of cognitive extension. The upshot is a better appreciation of the way in which claims about extended knowledge and the extended mind might be affected by a consideration of technologically-advanced resources.}, } @article {pmid34740667, year = {2022}, author = {Munawar, HS and Mojtahedi, M and Hammad, AWA and Kouzani, A and Mahmud, MAP}, title = {Disruptive technologies as a solution for disaster risk management: A review.}, journal = {The Science of the total environment}, volume = {806}, number = {Pt 3}, pages = {151351}, doi = {10.1016/j.scitotenv.2021.151351}, pmid = {34740667}, issn = {1879-1026}, mesh = {Artificial Intelligence ; Big Data ; Data Science ; *Disasters ; *Disruptive Technology ; }, abstract = {Integrating disruptive technologies within smart cities improves the infrastructure needed to potentially deal with disasters. This paper provides a perspective review of disruptive technologies such as the Internet of Things (IoT), image processing, artificial intelligence (AI), big data and smartphone applications which are in use and have been proposed for future improvements in disaster management of urban regions. The key focus of this paper is exploring ways in which smart cities could be established to harness the potential of disruptive technologies and improve post-disaster management. The key questions explored are a) what are the gaps or barriers to the utilization of disruptive technologies in the area of disaster management and b) How can the existing methods of disaster management be improved through the application of disruptive technologies. To respond to these questions, a novel framework based on integrated approaches based on big data analytics and AI is proposed for developing disaster management solutions using disruptive technologies.}, } @article {pmid34737350, year = {2021}, author = {Alammari, A and Moiz, SA and Negi, A}, title = {Enhanced layered fog architecture for IoT sensing and actuation as a service.}, journal = {Scientific reports}, volume = {11}, number = {1}, pages = {21693}, pmid = {34737350}, issn = {2045-2322}, abstract = {The reduced service cost offered by Sensing and Actuation as a Service paradigm, particularly in Internet of Things (IoT) era, has encouraged many establishments to start without worrying about having their own infrastructure. Such a paradigm is typically managed by a centralized cloud service provider. Fog paradigm has emerged as a mini-cloud that if designed with care to assist the cloud, together will achieve better performance. This article introduces a layered fog architecture called Sensors and Actuator Layered Fog Services Delivery (SALFSD) for IoT ecosystems. The significance of SALFSD is being fault resistant; it dynamically reassigns tasks of the failed node to the nearest active node to maintain the network connection. Besides, SALFSD monitors end users pre-specified cases closer to the physical devices hired by end users to fasten generating the actuation commands. Such node may offload its monitoring responsibility to its parent node in case it is overloaded. SALFSD is evaluated using Yet Another Fog Simulator in different scenarios (numbers of users, sensors, actuators, and areas). A comparison was made for Sensing and Actuating as a Service (SAaaS) with/without layered fog, and layered fog with/without (failure reassignment, pre-specified cases in fog nodes, and offloading). The comparison was conducted in terms of computing/communication latencies and the number of missed messages for both observations and actuation commands. Results show that failure reassignment prevented losing messages and maintained network connectivity. Also, wisely selecting the monitoring fog node per end user pre-specified cases and the offloading scheme decreased actuation latency.}, } @article {pmid34736170, year = {2021}, author = {Alassafi, MO}, title = {Success indicators for an efficient utilization of cloud computing in healthcare organizations: Saudi healthcare as case study.}, journal = {Computer methods and programs in biomedicine}, volume = {212}, number = {}, pages = {106466}, doi = {10.1016/j.cmpb.2021.106466}, pmid = {34736170}, issn = {1872-7565}, mesh = {*Cloud Computing ; *Computer Security ; Delivery of Health Care ; Reproducibility of Results ; Saudi Arabia ; }, abstract = {The population in Saudi Arabia is expected to reach 40 million by 2025. Consequently, healthcare information will become critical to manage. Despite the fact that adopting cloud computing in the Saudi healthcare organizations can facilitate cost reduction, capacity building, institutional interoperability, and get access to data analytics, the adoption rate is very low. Hence, a new model is proposed to adopt cloud computing in the Saudi healthcare organization. The novelty of this work comes from using a quantitative method to test users' attitudes, data security, data control, data privacy, compliance, and reliability influence on the cloud computing adoption intention in the context of Saudi Arabian healthcare organizations. Partial Least Squares (PLS) method based Structural Equation Modeling (SEM) was used for model development. About 160 respondents from the relevant health organizations participated. The result shows that the attitude towards using technology, data security, compliance, and reliability of the cloud computing services are important determining factors in the adoption of cloud computing in Saudi healthcare organizations. However, the distinction in the findings regarding Data privacy and Data control in the Saudi healthcare organizational context is a clear manifestation of the fact that there is a need for policy formation for data privacy, data control, and data protection legislation in Saudi Arabia. Therefore, raising awareness regarding the practice of data privacy and data control policies among IT managers is essential. Future study should use a more holistic and industry-specific framework such as the technology-organization-environment (TOE) framework to find new influencing factors from the domains of technological context, the organizational context, and the environmental context.}, } @article {pmid34731183, year = {2021}, author = {Ran, H}, title = {Construction and optimization of inventory management system via cloud-edge collaborative computing in supply chain environment in the Internet of Things era.}, journal = {PloS one}, volume = {16}, number = {11}, pages = {e0259284}, pmid = {34731183}, issn = {1932-6203}, mesh = {Algorithms ; Cloud Computing ; Commerce/*methods ; Equipment and Supplies ; Internet of Things ; }, abstract = {The present work aims to strengthen the core competitiveness of industrial enterprises in the supply chain environment, and enhance the efficiency of inventory management and the utilization rate of inventory resources. First, an analysis is performed on the supply and demand relationship between suppliers and manufacturers in the supply chain environment and the production mode of intelligent plant based on cloud manufacturing. It is found that the efficient management of spare parts inventory can effectively reduce costs and improve service levels. On this basis, different prediction methods are proposed for different data types of spare parts demand, which are all verified. Finally, the inventory management system based on cloud-edge collaborative computing is constructed, and the genetic algorithm is selected as a comparison to validate the performance of the system reported here. The experimental results indicate that prediction method based on weighted summation of eigenvalues and fitting proposed here has the smallest error and the best fitting effect in the demand prediction of machine spare parts, and the minimum error after fitting is only 2.2%. Besides, the spare parts demand prediction method can well complete the prediction in the face of three different types of time series of spare parts demand data, and the relative error of prediction is maintained at about 10%. This prediction system can meet the basic requirements of spare parts demand prediction and achieve higher prediction accuracy than the periodic prediction method. Moreover, the inventory management system based on cloud-edge collaborative computing has shorter processing time, higher efficiency, better stability, and better overall performance than genetic algorithm. The research results provide reference and ideas for the application of edge computing in inventory management, which have certain reference significance and application value.}, } @article {pmid34729386, year = {2021}, author = {Simonetti, D and Pimple, U and Langner, A and Marelli, A}, title = {Pan-tropical Sentinel-2 cloud-free annual composite datasets.}, journal = {Data in brief}, volume = {39}, number = {}, pages = {107488}, pmid = {34729386}, issn = {2352-3409}, abstract = {Sentinel-2 MSI is one of the core missions of the Copernicus Earth Observation programme of the European Union. This mission shows great potential to map the regional high-resolution spatio-temporal dynamics of land use and land cover. In tropical regions, despite the high revisiting time of 5 days including both Sentinel-2A and 2B satellites, the frequent presence of clouds, cloud-shadows, haze and other atmospheric contaminants are precluding the visibility of the Earth surface up to several months. In this paper we present four annual pan-tropical cloud-free composites computed and exported from Google Earth Engine (GEE) by making use of available Sentinel-2 L1C collection for the period spanning from 2015 to 2020. We furthermore propose empirical approaches to reduce the BRDF effect over tropical forest areas by showing pros and cons of image-based versus swath-based methodologies. Additionally, we provide a dedicated web-platform offering a fast and intuitive way to browse and explore the proposed annual composites as well as layers of potential annual changes as a ready-to-use means to visually identify and verify degradation and deforestation activities as well as other land cover changes.}, } @article {pmid34729056, year = {2021}, author = {Chen, SW and Gu, XW and Wang, JJ and Zhu, HS}, title = {AIoT Used for COVID-19 Pandemic Prevention and Control.}, journal = {Contrast media & molecular imaging}, volume = {2021}, number = {}, pages = {3257035}, pmid = {34729056}, issn = {1555-4317}, mesh = {*Artificial Intelligence ; COVID-19/*prevention & control/virology ; Delivery of Health Care/*standards ; Humans ; Internet of Things/*statistics & numerical data ; *Machine Learning ; SARS-CoV-2/*isolation & purification ; }, abstract = {The pandemic of COVID-19 is continuing to wreak havoc in 2021, with at least 170 million victims around the world. Healthcare systems are overwhelmed by the large-scale virus infection. Luckily, Internet of Things (IoT) is one of the most effective paradigms in the intelligent world, in which the technology of artificial intelligence (AI), like cloud computing and big data analysis, is playing a vital role in preventing the spread of the pandemic of COVID-19. AI and 5G technologies are advancing by leaps and bounds, further strengthening the intelligence and connectivity of IoT applications, and conventional IoT has been gradually upgraded to be more powerful AI + IoT (AIoT). For example, in terms of remote screening and diagnosis of COVID-19 patients, AI technology based on machine learning and deep learning has recently upgraded medical equipment significantly and has reshaped the workflow with minimal contact with patients, so medical specialists can make clinical decisions more efficiently, providing the best protection not only to patients but also to specialists themselves. This paper reviews the latest progress made in combating COVID-19 with both IoT and AI and also provides comprehensive details on how to combat the pandemic of COVID-19 as well as the technologies that may be applied in the future.}, } @article {pmid34723173, year = {2021}, author = {Zhang, W and Wang, Y and Ji, X and Wu, Y and Zhao, R}, title = {ROA: A Rapid Learning Scheme for In-Situ Memristor Networks.}, journal = {Frontiers in artificial intelligence}, volume = {4}, number = {}, pages = {692065}, pmid = {34723173}, issn = {2624-8212}, abstract = {Memristors show great promise in neuromorphic computing owing to their high-density integration, fast computing and low-energy consumption. However, the non-ideal update of synaptic weight in memristor devices, including nonlinearity, asymmetry and device variation, still poses challenges to the in-situ learning of memristors, thereby limiting their broad applications. Although the existing offline learning schemes can avoid this problem by transferring the weight optimization process into cloud, it is difficult to adapt to unseen tasks and uncertain environments. Here, we propose a bi-level meta-learning scheme that can alleviate the non-ideal update problem, and achieve fast adaptation and high accuracy, named Rapid One-step Adaption (ROA). By introducing a special regularization constraint and a dynamic learning rate strategy for in-situ learning, the ROA method effectively combines offline pre-training and online rapid one-step adaption. Furthermore, we implemented it on memristor-based neural networks to solve few-shot learning tasks, proving its superiority over the pure offline and online schemes under noisy conditions. This method can solve in-situ learning in non-ideal memristor networks, providing potential applications of on-chip neuromorphic learning and edge computing.}, } @article {pmid36618951, year = {2021}, author = {Moosavi, J and Bakhshi, J and Martek, I}, title = {The application of industry 4.0 technologies in pandemic management: Literature review and case study.}, journal = {Healthcare analytics (New York, N.Y.)}, volume = {1}, number = {}, pages = {100008}, pmid = {36618951}, issn = {2772-4425}, abstract = {The Covid-19 pandemic impact on people's lives has been devastating. Around the world, people have been forced to stay home, resorting to the use of digital technologies in an effort to continue their life and work as best they can. Covid-19 has thus accelerated society's digital transformation towards Industry 4.0 (the fourth industrial revolution). Using scientometric analysis, this study presents a systematic literature review of the themes within Industry 4.0. Thematic analysis reveals that the Internet of Things (IoT), Artificial Intelligence (AI), Cloud computing, Machine learning, Security, Big Data, Blockchain, Deep learning, Digitalization, and Cyber-physical system (CPS) to be the key technologies associated with Industry 4.0. Subsequently, a case study using Industry 4.0 technologies to manage the Covid-19 pandemic is discussed. In conclusion, Covid-19,is clearly shown to be an accelerant in the progression towards Industry 4.0. Moreover, the technologies of this digital transformation can be expected to be invoked in the management of future pandemics.}, } @article {pmid34715889, year = {2021}, author = {Amselem, S and Gueguen, S and Weinbach, J and Clement, A and Landais, P and , }, title = {RaDiCo, the French national research program on rare disease cohorts.}, journal = {Orphanet journal of rare diseases}, volume = {16}, number = {1}, pages = {454}, pmid = {34715889}, issn = {1750-1172}, mesh = {Europe ; France/epidemiology ; Humans ; *Rare Diseases/epidemiology/genetics ; }, abstract = {BACKGROUND: Rare diseases (RDs) affect nearly 3 million people in France and at least 26-30 million people in Europe. These diseases, which represent a major medical concern, are mainly of genetic origin, often chronic, progressive, degenerative, life threatening and disabling, accounting for more than one third of all deaths occurring during infancy. In this context, there are needs for coordinated information on RDs at national/international levels, based on high quality, interoperable and sharable data. The main objective of the RaDiCo (Rare Disease Cohorts) program, coordinated by Inserm, was the development of RD e-cohorts via a national platform. The cohort projects were selected through a national call in 2014. The e-cohorts are supported by an interoperable platform, equivalent to an infrastructure, constructed on the "cloud computing" principle and in compliance with the European General Data Protection Regulation. It is dedicated to allow a continuous monitoring of data quality and consistency, in line with the French Health Data Hub.

RESULTS: Depending on cohorts, the objectives are to describe the natural history of the studied RD(s), identify the underlying disease genes, establish phenotype-genotype correlations, decipher their pathophysiology, assess their societal and medico-economic impact, and/or identify patients eligible for new therapeutic approaches. Inclusion of prevalent and incident cases started at the end of 2016. As of April 2021, 5558 patients have been included within 13 RD e-cohorts covering 67 diseases integrated in 10 European Reference Networks and contributing to the European Joint Program on RDs. Several original results have been obtained in relation with the secondary objectives of the RaDiCo cohorts. They deal with discovery of new disease genes, assessment of treatment management, deciphering the underlying pathophysiological mechanisms, diagnostic approaches, genotype-phenotype relationships, development and validation of questionnaires relative to disease burden, or methodological aspects.

CONCLUSION: RaDiCo currently hosts 13 RD e-cohorts on a sharable and interoperable platform constructed on the "cloud computing" principle. New RD e-cohorts at the European and international levels are targeted.}, } @article {pmid34713107, year = {2021}, author = {Spanakis, EG and Sfakianakis, S and Bonomi, S and Ciccotelli, C and Magalini, S and Sakkalis, V}, title = {Emerging and Established Trends to Support Secure Health Information Exchange.}, journal = {Frontiers in digital health}, volume = {3}, number = {}, pages = {636082}, pmid = {34713107}, issn = {2673-253X}, abstract = {This work aims to provide information, guidelines, established practices and standards, and an extensive evaluation on new and promising technologies for the implementation of a secure information sharing platform for health-related data. We focus strictly on the technical aspects and specifically on the sharing of health information, studying innovative techniques for secure information sharing within the health-care domain, and we describe our solution and evaluate the use of blockchain methodologically for integrating within our implementation. To do so, we analyze health information sharing within the concept of the PANACEA project that facilitates the design, implementation, and deployment of a relevant platform. The research presented in this paper provides evidence and argumentation toward advanced and novel implementation strategies for a state-of-the-art information sharing environment; a description of high-level requirements for the transfer of data between different health-care organizations or cross-border; technologies to support the secure interconnectivity and trust between information technology (IT) systems participating in a sharing-data "community"; standards, guidelines, and interoperability specifications for implementing a common understanding and integration in the sharing of clinical information; and the use of cloud computing and prospectively more advanced technologies such as blockchain. The technologies described and the possible implementation approaches are presented in the design of an innovative secure information sharing platform in the health-care domain.}, } @article {pmid34708196, year = {2021}, author = {Li, L and Thompson, C and Henselman-Petrusek, G and Giusti, C and Ziegelmeier, L}, title = {Minimal Cycle Representatives in Persistent Homology Using Linear Programming: An Empirical Study With User's Guide.}, journal = {Frontiers in artificial intelligence}, volume = {4}, number = {}, pages = {681117}, pmid = {34708196}, issn = {2624-8212}, abstract = {Cycle representatives of persistent homology classes can be used to provide descriptions of topological features in data. However, the non-uniqueness of these representatives creates ambiguity and can lead to many different interpretations of the same set of classes. One approach to solving this problem is to optimize the choice of representative against some measure that is meaningful in the context of the data. In this work, we provide a study of the effectiveness and computational cost of several ℓ 1 minimization optimization procedures for constructing homological cycle bases for persistent homology with rational coefficients in dimension one, including uniform-weighted and length-weighted edge-loss algorithms as well as uniform-weighted and area-weighted triangle-loss algorithms. We conduct these optimizations via standard linear programming methods, applying general-purpose solvers to optimize over column bases of simplicial boundary matrices. Our key findings are: 1) optimization is effective in reducing the size of cycle representatives, though the extent of the reduction varies according to the dimension and distribution of the underlying data, 2) the computational cost of optimizing a basis of cycle representatives exceeds the cost of computing such a basis, in most data sets we consider, 3) the choice of linear solvers matters a lot to the computation time of optimizing cycles, 4) the computation time of solving an integer program is not significantly longer than the computation time of solving a linear program for most of the cycle representatives, using the Gurobi linear solver, 5) strikingly, whether requiring integer solutions or not, we almost always obtain a solution with the same cost and almost all solutions found have entries in { - 1,0,1 } and therefore, are also solutions to a restricted ℓ 0 optimization problem, and 6) we obtain qualitatively different results for generators in Erdős-Rényi random clique complexes than in real-world and synthetic point cloud data.}, } @article {pmid34702704, year = {2021}, author = {Zheng, GY and Zeng, T and Li, YX}, title = {Application and prospect of cutting-edge information technology in biomedical big data.}, journal = {Yi chuan = Hereditas}, volume = {43}, number = {10}, pages = {924-929}, doi = {10.16288/j.yczz21-192}, pmid = {34702704}, issn = {0253-9772}, mesh = {Artificial Intelligence ; Big Data ; *Biomedical Research ; Cloud Computing ; *Information Technology ; }, abstract = {In recent years, with the development of various high-throughput omics based biological technologies (BT), biomedical research began to enter the era of big data. In the face of high-dimensional, multi-domain and multi-modal biomedical big data, scientific research requires a new paradigm of data intensive scientific research. The vigorous development of cutting-edge information technologies (IT) such as cloud computing, blockchain and artificial intelligence provides technical means for the practice of this new research paradigm. Here,we describe the application of such cutting-edge information technologies in biomedical big data, and propose a forward-looking prospect for the construction of a new paradigm supporting environment for data intensive scientific research. We expect to establish a new research scheme and new scientific research paradigm integrating BT & IT technology, which can finally promote the great leap forward development of biomedical research.}, } @article {pmid34696135, year = {2021}, author = {Mutlag, AA and Ghani, MKA and Mohammed, MA and Lakhan, A and Mohd, O and Abdulkareem, KH and Garcia-Zapirain, B}, title = {Multi-Agent Systems in Fog-Cloud Computing for Critical Healthcare Task Management Model (CHTM) Used for ECG Monitoring.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {20}, pages = {}, pmid = {34696135}, issn = {1424-8220}, mesh = {*Cloud Computing ; Computer Simulation ; Delivery of Health Care ; *Electrocardiography ; Models, Theoretical ; }, abstract = {In the last decade, the developments in healthcare technologies have been increasing progressively in practice. Healthcare applications such as ECG monitoring, heartbeat analysis, and blood pressure control connect with external servers in a manner called cloud computing. The emerging cloud paradigm offers different models, such as fog computing and edge computing, to enhance the performances of healthcare applications with minimum end-to-end delay in the network. However, many research challenges exist in the fog-cloud enabled network for healthcare applications. Therefore, in this paper, a Critical Healthcare Task Management (CHTM) model is proposed and implemented using an ECG dataset. We design a resource scheduling model among fog nodes at the fog level. A multi-agent system is proposed to provide the complete management of the network from the edge to the cloud. The proposed model overcomes the limitations of providing interoperability, resource sharing, scheduling, and dynamic task allocation to manage critical tasks significantly. The simulation results show that our model, in comparison with the cloud, significantly reduces the network usage by 79%, the response time by 90%, the network delay by 65%, the energy consumption by 81%, and the instance cost by 80%.}, } @article {pmid34696070, year = {2021}, author = {Andreazi, GT and Estrella, JC and Bruschi, SM and Immich, R and Guidoni, D and Alves Pereira Júnior, L and Meneguette, RI}, title = {MoHRiPA-An Architecture for Hybrid Resources Management of Private Cloud Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {20}, pages = {}, pmid = {34696070}, issn = {1424-8220}, support = {2021/02//ITA's Programa de Pós-graduação em Aplicações Operacionais (ITA/PPGAO)/ ; #2020/07162-0//Fundação de Amparo à Pesquisa do Estado de São Paulo/ ; }, mesh = {Algorithms ; *Cloud Computing ; *Ecosystem ; Workload ; }, abstract = {The high demand for data processing in web applications has grown in recent years due to the increased computing infrastructure supply as a service in a cloud computing ecosystem. This ecosystem offers benefits such as broad network access, elasticity, and resource sharing, among others. However, properly exploiting these benefits requires optimized provisioning of computational resources in the target infrastructure. Several studies in the literature improve the quality of this management, which involves enhancing the scalability of the infrastructure, either through cost management policies or strategies aimed at resource scaling. However, few studies adequately explore performance evaluation mechanisms. In this context, we present the MoHRiPA-Management of Hybrid Resources in Private cloud Architecture. MoHRiPA has a modular design encompassing scheduling algorithms, virtualization tools, and monitoring tools. The proposed architecture solution allows assessing the overall system's performance by using complete factorial planning to identify the general behavior of architecture under high demand of requests. It also evaluates workload behavior, the number of virtualized resources, and provides an elastic resource manager. A composite metric is also proposed and adopted as a criterion for resource scaling. This work presents a performance evaluation by using formal techniques, which analyses the scheduling algorithms of architecture and the experiment bottlenecks analysis, average response time, and latency. In summary, the proposed MoHRiPA mapping resources algorithm (HashRefresh) showed significant improvement results than the analyzed competitor, decreasing about 7% percent in the uniform average compared to ListSheduling (LS).}, } @article {pmid34696034, year = {2021}, author = {Song, M and Sang, Y}, title = {Secure Outsourcing of Matrix Determinant Computation under the Malicious Cloud.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {20}, pages = {}, pmid = {34696034}, issn = {1424-8220}, support = {201904010209//the Science and Technology Program of Guangzhou, China/ ; 2017A010101039//the Science and Technology Program of Guangdong Province, China/ ; }, abstract = {Computing the determinant of large matrix is a time-consuming task, which is appearing more and more widely in science and engineering problems in the era of big data. Fortunately, cloud computing can provide large storage and computation resources, and thus, act as an ideal platform to complete computation outsourced from resource-constrained devices. However, cloud computing also causes security issues. For example, the curious cloud may spy on user privacy through outsourced data. The malicious cloud violating computing scripts, as well as cloud hardware failure, will lead to incorrect results. Therefore, we propose a secure outsourcing algorithm to compute the determinant of large matrix under the malicious cloud mode in this paper. The algorithm protects the privacy of the original matrix by applying row/column permutation and other transformations to the matrix. To resist malicious cheating on the computation tasks, a new verification method is utilized in our algorithm. Unlike previous algorithms that require multiple rounds of verification, our verification requires only one round without trading off the cheating detectability, which greatly reduces the local computation burden. Both theoretical and experimental analysis demonstrate that our algorithm achieves a better efficiency on local users than previous ones on various dimensions of matrices, without sacrificing the security requirements in terms of privacy protection and cheating detectability.}, } @article {pmid34696006, year = {2021}, author = {Ilgner, P and Cika, P and Stusek, M}, title = {SCADA-Based Message Generator for Multi-Vendor Smart Grids: Distributed Integration and Verification of TASE.2.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {20}, pages = {}, pmid = {34696006}, issn = {1424-8220}, support = {TN01000077//Technology Agency of the Czech Republic/ ; }, mesh = {Communication ; *Computer Systems ; *Information Storage and Retrieval ; Reproducibility of Results ; }, abstract = {Recent developments in massive machine-type communication (mMTC) scenarios have given rise to never-seen requirements, which triggered the Industry 4.0 revolution. The new scenarios bring even more pressure to comply with the reliability and communication security and enable flawless functionality of the critical infrastructure, e.g., smart grid infrastructure. We discuss typical network grid architecture, communication strategies, and methods for building scalable and high-speed data processing and storage platform. This paper focuses on the data transmissions using the sets of standards IEC 60870-6 (ICCP/TASE.2). The main goal is to introduce the TASE.2 traffic generator and the data collection back-end with the implemented load balancing functionality to understand the limits of current protocols used in the smart grids. To this end, the assessment framework enabling generating and collecting TASE.2 communication with long-term data storage providing high availability and load balancing capabilities was developed. The designed proof-of-concept supports complete cryptographic security and allows users to perform the complex testing and verification of the TASE.2 network nodes configuration. Implemented components were tested in a cloud-based Microsoft Azure environment in four geographically separated locations. The findings from the testing indicate the high performance and scalability of the proposed platform, allowing the proposed generator to be also used for high-speed load testing purposes. The load-balancing performance shows the CPU usage of the load-balancer below 15% while processing 5000 messages per second. This makes it possible to achieve up to a 7-fold improvement of performance resulting in processing up to 35,000 messages per second.}, } @article {pmid34695973, year = {2021}, author = {Buckley, T and Ghosh, B and Pakrashi, V}, title = {Edge Structural Health Monitoring (E-SHM) Using Low-Power Wireless Sensing.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {20}, pages = {}, pmid = {34695973}, issn = {1424-8220}, support = {12/RC/2302_2//Science Foundation Ireland/Ireland ; EAPA_826/2018//EU Interreg/ ; PBA/BIO/18/02//ERA-NET COFUND/ ; }, mesh = {*Acceleration ; *Electric Power Supplies ; Monitoring, Physiologic ; }, abstract = {Effective Structural Health Monitoring (SHM) often requires continuous monitoring to capture changes of features of interest in structures, which are often located far from power sources. A key challenge lies in continuous low-power data transmission from sensors. Despite significant developments in long-range, low-power telecommunication (e.g., LoRa NB-IoT), there are inadequate demonstrative benchmarks for low-power SHM. Damage detection is often based on monitoring features computed from acceleration signals where data are extensive due to the frequency of sampling (~100-500 Hz). Low-power, long-range telecommunications are restricted in both the size and frequency of data packets. However, microcontrollers are becoming more efficient, enabling local computing of damage-sensitive features. This paper demonstrates the implementation of an Edge-SHM framework through low-power, long-range, wireless, low-cost and off-the-shelf components. A bespoke setup is developed with a low-power MEM accelerometer and a microcontroller where frequency and time domain features are computed over set time intervals before sending them to a cloud platform. A cantilever beam excited by an electrodynamic shaker is monitored, where damage is introduced through the controlled loosening of bolts at the fixed boundary, thereby introducing rotation at its fixed end. The results demonstrate how an IoT-driven edge platform can benefit continuous monitoring.}, } @article {pmid34693068, year = {2020}, author = {Gorgulla, C and Fackeldey, K and Wagner, G and Arthanari, H}, title = {Accounting of Receptor Flexibility in Ultra-Large Virtual Screens with VirtualFlow Using a Grey Wolf Optimization Method.}, journal = {Supercomputing frontiers and innovations}, volume = {7}, number = {3}, pages = {4-12}, pmid = {34693068}, issn = {2313-8734}, support = {R01 AI037581/AI/NIAID NIH HHS/United States ; R01 AI150709/AI/NIAID NIH HHS/United States ; R01 CA200913/CA/NCI NIH HHS/United States ; R01 GM129026/GM/NIGMS NIH HHS/United States ; }, abstract = {Structure-based virtual screening approaches have the ability to dramatically reduce the time and costs associated to the discovery of new drug candidates. Studies have shown that the true hit rate of virtual screenings improves with the scale of the screened ligand libraries. Therefore, we have recently developed an open source drug discovery platform (VirtualFlow), which is able to routinely carry out ultra-large virtual screenings. One of the primary challenges of molecular docking is the circumstance when the protein is highly dynamic or when the structure of the protein cannot be captured by a static pose. To accommodate protein dynamics, we report the extension of VirtualFlow to allow the docking of ligands using a grey wolf optimization algorithm using the docking program GWOVina, which substantially improves the quality and efficiency of flexible receptor docking compared to AutoDock Vina. We demonstrate the linear scaling behavior of VirtualFlow utilizing GWOVina up to 128 000 CPUs. The newly supported docking method will be valuable for drug discovery projects in which protein dynamics and flexibility play a significant role.}, } @article {pmid34690611, year = {2021}, author = {Nour, B and Mastorakis, S and Mtibaa, A}, title = {Whispering: Joint Service Offloading and Computation Reuse in Cloud-Edge Networks.}, journal = {IEEE International Conference on Communications : [proceedings]. IEEE International Conference on Communications}, volume = {2021}, number = {}, pages = {}, pmid = {34690611}, issn = {1938-1883}, support = {P20 GM109090/GM/NIGMS NIH HHS/United States ; }, abstract = {Due to the proliferation of Internet of Things (IoT) and application/user demands that challenge communication and computation, edge computing has emerged as the paradigm to bring computing resources closer to users. In this paper, we present Whispering, an analytical model for the migration of services (service offloading) from the cloud to the edge, in order to minimize the completion time of computational tasks offloaded by user devices and improve the utilization of resources. We also empirically investigate the impact of reusing the results of previously executed tasks for the execution of newly received tasks (computation reuse) and propose an adaptive task offloading scheme between edge and cloud. Our evaluation results show that Whispering achieves up to 35% and 97% (when coupled with computation reuse) lower task completion times than cases where tasks are executed exclusively at the edge or the cloud.}, } @article {pmid34690529, year = {2022}, author = {Sheikh Sofla, M and Haghi Kashani, M and Mahdipour, E and Faghih Mirzaee, R}, title = {Towards effective offloading mechanisms in fog computing.}, journal = {Multimedia tools and applications}, volume = {81}, number = {2}, pages = {1997-2042}, pmid = {34690529}, issn = {1380-7501}, abstract = {Fog computing is considered a formidable next-generation complement to cloud computing. Nowadays, in light of the dramatic rise in the number of IoT devices, several problems have been raised in cloud architectures. By introducing fog computing as a mediate layer between the user devices and the cloud, one can extend cloud computing's processing and storage capability. Offloading can be utilized as a mechanism that transfers computations, data, and energy consumption from the resource-limited user devices to resource-rich fog/cloud layers to achieve an optimal experience in the quality of applications and improve the system performance. This paper provides a systematic and comprehensive study to evaluate fog offloading mechanisms' current and recent works. Each selected paper's pros and cons are explored and analyzed to state and address the present potentialities and issues of offloading mechanisms in a fog environment efficiently. We classify offloading mechanisms in a fog system into four groups, including computation-based, energy-based, storage-based, and hybrid approaches. Furthermore, this paper explores offloading metrics, applied algorithms, and evaluation methods related to the chosen offloading mechanisms in fog systems. Additionally, the open challenges and future trends derived from the reviewed studies are discussed.}, } @article {pmid34686040, year = {2021}, author = {Li, XG and Blaiszik, B and Schwarting, ME and Jacobs, R and Scourtas, A and Schmidt, KJ and Voyles, PM and Morgan, D}, title = {Graph network based deep learning of bandgaps.}, journal = {The Journal of chemical physics}, volume = {155}, number = {15}, pages = {154702}, doi = {10.1063/5.0066009}, pmid = {34686040}, issn = {1089-7690}, abstract = {Recent machine learning models for bandgap prediction that explicitly encode the structure information to the model feature set significantly improve the model accuracy compared to both traditional machine learning and non-graph-based deep learning methods. The ongoing rapid growth of open-access bandgap databases can benefit such model construction not only by expanding their domain of applicability but also by requiring constant updating of the model. Here, we build a new state-of-the-art multi-fidelity graph network model for bandgap prediction of crystalline compounds from a large bandgap database of experimental and density functional theory (DFT) computed bandgaps with over 806 600 entries (1500 experimental, 775 700 low-fidelity DFT, and 29 400 high-fidelity DFT). The model predicts bandgaps with a 0.23 eV mean absolute error in cross validation for high-fidelity data, and including the mixed data from all different fidelities improves the prediction of the high-fidelity data. The prediction error is smaller for high-symmetry crystals than for low symmetry crystals. Our data are published through a new cloud-based computing environment, called the "Foundry," which supports easy creation and revision of standardized data structures and will enable cloud accessible containerized models, allowing for continuous model development and data accumulation in the future.}, } @article {pmid34681121, year = {2021}, author = {Lim, HG and Hsiao, SH and Lee, YG}, title = {Orchestrating an Optimized Next-Generation Sequencing-Based Cloud Workflow for Robust Viral Identification during Pandemics.}, journal = {Biology}, volume = {10}, number = {10}, pages = {}, pmid = {34681121}, issn = {2079-7737}, support = {MOST 109-2221-E-038-016//Ministry of Science and Technology, Taiwan/ ; HHSN261201400008C//National Institutes of Health/ ; }, abstract = {Coronavirus disease 2019 (COVID-19), caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), has recently become a novel pandemic event following the swine flu that occurred in 2009, which was caused by the influenza A virus (H1N1 subtype). The accurate identification of the huge number of samples during a pandemic still remains a challenge. In this study, we integrate two technologies, next-generation sequencing and cloud computing, into an optimized workflow version that uses a specific identification algorithm on the designated cloud platform. We use 182 samples (92 for COVID-19 and 90 for swine flu) with short-read sequencing data from two open-access datasets to represent each pandemic and evaluate our workflow performance based on an index specifically created for SARS-CoV-2 or H1N1. Results show that our workflow could differentiate cases between the two pandemics with a higher accuracy depending on the index used, especially when the index that exclusively represented each dataset was used. Our workflow substantially outperforms the original complete identification workflow available on the same platform in terms of time and cost by preserving essential tools internally. Our workflow can serve as a powerful tool for the robust identification of cases and, thus, aid in controlling the current and future pandemics.}, } @article {pmid34680183, year = {2021}, author = {Prakash, A and Taylor, L and Varkey, M and Hoxie, N and Mohammed, Y and Goo, YA and Peterman, S and Moghekar, A and Yuan, Y and Glaros, T and Steele, JR and Faridi, P and Parihari, S and Srivastava, S and Otto, JJ and Nyalwidhe, JO and Semmes, OJ and Moran, MF and Madugundu, A and Mun, DG and Pandey, A and Mahoney, KE and Shabanowitz, J and Saxena, S and Orsburn, BC}, title = {Reinspection of a Clinical Proteomics Tumor Analysis Consortium (CPTAC) Dataset with Cloud Computing Reveals Abundant Post-Translational Modifications and Protein Sequence Variants.}, journal = {Cancers}, volume = {13}, number = {20}, pages = {}, pmid = {34680183}, issn = {2072-6694}, support = {P30 CA015083/CA/NCI NIH HHS/United States ; }, abstract = {The Clinical Proteomic Tumor Analysis Consortium (CPTAC) has provided some of the most in-depth analyses of the phenotypes of human tumors ever constructed. Today, the majority of proteomic data analysis is still performed using software housed on desktop computers which limits the number of sequence variants and post-translational modifications that can be considered. The original CPTAC studies limited the search for PTMs to only samples that were chemically enriched for those modified peptides. Similarly, the only sequence variants considered were those with strong evidence at the exon or transcript level. In this multi-institutional collaborative reanalysis, we utilized unbiased protein databases containing millions of human sequence variants in conjunction with hundreds of common post-translational modifications. Using these tools, we identified tens of thousands of high-confidence PTMs and sequence variants. We identified 4132 phosphorylated peptides in nonenriched samples, 93% of which were confirmed in the samples which were chemically enriched for phosphopeptides. In addition, our results also cover 90% of the high-confidence variants reported by the original proteogenomics study, without the need for sample specific next-generation sequencing. Finally, we report fivefold more somatic and germline variants that have an independent evidence at the peptide level, including mutations in ERRB2 and BCAS1. In this reanalysis of CPTAC proteomic data with cloud computing, we present an openly available and searchable web resource of the highest-coverage proteomic profiling of human tumors described to date.}, } @article {pmid34677328, year = {2021}, author = {Mamdiwar, SD and R, A and Shakruwala, Z and Chadha, U and Srinivasan, K and Chang, CY}, title = {Recent Advances on IoT-Assisted Wearable Sensor Systems for Healthcare Monitoring.}, journal = {Biosensors}, volume = {11}, number = {10}, pages = {}, pmid = {34677328}, issn = {2079-6374}, support = {MOST109-2221-E-224-048- MY2//Ministry of Science and Technology, Taiwan/ ; Higher Education Sprout Project//Ministry of Education/ ; }, mesh = {Delivery of Health Care ; Humans ; *Wearable Electronic Devices ; }, abstract = {IoT has played an essential role in many industries over the last few decades. Recent advancements in the healthcare industry have made it possible to make healthcare accessible to more people and improve their overall health. The next step in healthcare is to integrate it with IoT-assisted wearable sensor systems seamlessly. This review rigorously discusses the various IoT architectures, different methods of data processing, transfer, and computing paradigms. It compiles various communication technologies and the devices commonly used in IoT-assisted wearable sensor systems and deals with its various applications in healthcare and their advantages to the world. A comparative analysis of all the wearable technology in healthcare is also discussed with tabulation of various research and technology. This review also analyses all the problems commonly faced in IoT-assisted wearable sensor systems and the specific issues that need to be tackled to optimize these systems in healthcare and describes the various future implementations that can be made to the architecture and the technology to improve the healthcare industry.}, } @article {pmid34660507, year = {2021}, author = {Senthilkumar, S and Brindha, K and Kryvinska, N and Bhattacharya, S and Reddy Bojja, G}, title = {SCB-HC-ECC-Based Privacy Safeguard Protocol for Secure Cloud Storage of Smart Card-Based Health Care System.}, journal = {Frontiers in public health}, volume = {9}, number = {}, pages = {688399}, pmid = {34660507}, issn = {2296-2565}, mesh = {Cloud Computing ; Computer Security ; Confidentiality ; Delivery of Health Care ; *Health Smart Cards ; Humans ; Privacy ; }, abstract = {The advent of the internet has brought an era of unprecedented connectivity between networked devices, making one distributed computing, called cloud computing, and popular. This has also resulted in a dire need for remote authentication schemes for transferring files of a sensitive nature, especially health-related information between patients, smart health cards, and cloud servers via smart health card solution providers. In this article, we elaborate on our proposed approach for such a system and accomplish an informal analysis to demonstrate the claim that this scheme provides sufficient security while maintaining usability.}, } @article {pmid34656885, year = {2021}, author = {Landman, T and Nissim, N}, title = {Deep-Hook: A trusted deep learning-based framework for unknown malware detection and classification in Linux cloud environments.}, journal = {Neural networks : the official journal of the International Neural Network Society}, volume = {144}, number = {}, pages = {648-685}, doi = {10.1016/j.neunet.2021.09.019}, pmid = {34656885}, issn = {1879-2782}, mesh = {Cloud Computing ; *Deep Learning ; Neural Networks, Computer ; Software ; }, abstract = {Since the beginning of the 21st century, the use of cloud computing has increased rapidly, and it currently plays a significant role among most organizations' information technology (IT) infrastructure. Virtualization technologies, particularly virtual machines (VMs), are widely used and lie at the core of cloud computing. While different operating systems can run on top of VM instances, in public cloud environments the Linux operating system is used 90% of the time. Because of their prevalence, organizational Linux-based virtual servers have become an attractive target for cyber-attacks, mainly launched by sophisticated malware designed at causing harm, sabotaging operations, obtaining data, or gaining financial profit. This has resulted in the need for an advanced and reliable unknown malware detection mechanism for Linux cloud-based environments. Antivirus software and today's even more advanced malware detection solutions have limitations in detecting new, unseen, and evasive malware. Moreover, many existing solutions are considered untrusted, as they operate on the inspected machine and can be interfered with, and can even be detected by the malware itself, allowing malware to evade detection and cause damage. In this paper, we propose Deep-Hook, a trusted framework for unknown malware detection in Linux-based cloud environments. Deep-Hook hooks the VM's volatile memory in a trusted manner and acquires the memory dump to discover malware footprints while the VM operates. The memory dumps are transformed into visual images which are analyzed using a convolutional neural network (CNN) based classifier. The proposed framework has some key advantages, such as its agility, its ability to eliminate the need for features defined by a cyber domain expert, and most importantly, its ability to analyze the entire memory dump and thus to better utilize the existing indication it conceals, thus allowing the induction of a more accurate detection model. Deep-Hook was evaluated on widely used Linux virtual servers; four state-of-the-art CNN architectures; eight image resolutions; and a total of 22,400 volatile memory dumps representing the execution of a broad set of benign and malicious Linux applications. Our experimental evaluation results demonstrate Deep-Hook's ability to effectively, efficiently, and accurately detect and classify unknown malware (even evasive malware like rootkits), with an AUC and accuracy of up to 99.9%.}, } @article {pmid34641012, year = {2021}, author = {Hannan, A and Shafiq, MZ and Hussain, F and Pires, IM}, title = {A Portable Smart Fitness Suite for Real-Time Exercise Monitoring and Posture Correction.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {19}, pages = {}, pmid = {34641012}, issn = {1424-8220}, support = {UIDB/50008/2020//Fundação para a Ciência e a Tecnologia/ ; }, mesh = {*COVID-19 ; Exercise ; Humans ; *Pandemics ; Posture ; SARS-CoV-2 ; }, abstract = {Fitness and sport have drawn significant attention in wearable and persuasive computing. Physical activities are worthwhile for health, well-being, improved fitness levels, lower mental pressure and tension levels. Nonetheless, during high-power and commanding workouts, there is a high likelihood that physical fitness is seriously influenced. Jarring motions and improper posture during workouts can lead to temporary or permanent disability. With the advent of technological advances, activity acknowledgment dependent on wearable sensors has pulled in countless studies. Still, a fully portable smart fitness suite is not industrialized, which is the central need of today's time, especially in the Covid-19 pandemic. Considering the effectiveness of this issue, we proposed a fully portable smart fitness suite for the household to carry on their routine exercises without any physical gym trainer and gym environment. The proposed system considers two exercises, i.e., T-bar and bicep curl with the assistance of the virtual real-time android application, acting as a gym trainer overall. The proposed fitness suite is embedded with a gyroscope and EMG sensory modules for performing the above two exercises. It provided alerts on unhealthy, wrong posture movements over an android app and is guided to the best possible posture based on sensor values. The KNN classification model is used for prediction and guidance for the user while performing a particular exercise with the help of an android application-based virtual gym trainer through a text-to-speech module. The proposed system attained 89% accuracy, which is quite effective with portability and a virtually assisted gym trainer feature.}, } @article {pmid34640861, year = {2021}, author = {Detti, A and Nakazato, H and Martínez Navarro, JA and Tropea, G and Funari, L and Petrucci, L and Sánchez Segado, JA and Kanai, K}, title = {VirIoT: A Cloud of Things That Offers IoT Infrastructures as a Service.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {19}, pages = {}, pmid = {34640861}, issn = {1424-8220}, support = {814918//H2020-EUJ-2018/ ; }, abstract = {Many cloud providers offer IoT services that simplify the collection and processing of IoT information. However, the IoT infrastructure composed of sensors and actuators that produces this information remains outside the cloud; therefore, application developers must install, connect and manage the cloud. This requirement can be a market barrier, especially for small/medium software companies that cannot afford the infrastructural costs associated with it and would only prefer to focus on IoT application developments. Motivated by the wish to eliminate this barrier, this paper proposes a Cloud of Things platform, called VirIoT, which fully brings the Infrastructure as a service model typical of cloud computing to the world of Internet of Things. VirIoT provides users with virtual IoT infrastructures (Virtual Silos) composed of virtual things, with which users can interact through dedicated and standardized broker servers in which the technology can be chosen among those offered by the platform, such as oneM2M, NGSI and NGSI-LD. VirIoT allows developers to focus their efforts exclusively on IoT applications without worrying about infrastructure management and allows cloud providers to expand their IoT services portfolio. VirIoT uses external things and cloud/edge computing resources to deliver the IoT virtualization services. Its open-source architecture is microservice-based and runs on top of a distributed Kubernetes platform with nodes in central and edge data centers. The architecture is scalable, efficient and able to support the continuous integration of heterogeneous things and IoT standards, taking care of interoperability issues. Using a VirIoT deployment spanning data centers in Europe and Japan, we conducted a performance evaluation with a two-fold objective: showing the efficiency and scalability of the architecture; and leveraging VirIoT's ability to integrate different IoT standards in order to make a fair comparison of some open-source IoT Broker implementations, namely Mobius for oneM2M, Orion for NGSIv2, Orion-LD and Scorpio for NGSI-LD.}, } @article {pmid34640825, year = {2021}, author = {Kashmar, N and Adda, M and Ibrahim, H}, title = {HEAD Metamodel: Hierarchical, Extensible, Advanced, and Dynamic Access Control Metamodel for Dynamic and Heterogeneous Structures.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {19}, pages = {}, pmid = {34640825}, issn = {1424-8220}, abstract = {The substantial advancements in information technologies have brought unprecedented concepts and challenges to provide solutions and integrate advanced and self-ruling systems in critical and heterogeneous structures. The new generation of networking environments (e.g., the Internet of Things (IoT), cloud computing, etc.) are dynamic and ever-evolving environments. They are composed of various private and public networks, where all resources are distributed and accessed from everywhere. Protecting resources by controlling access to them is a complicated task, especially with the presence of cybercriminals and cyberattacks. What makes this reality also challenging is the diversity and the heterogeneity of access control (AC) models, which are implemented and integrated with a countless number of information systems. The evolution of ubiquitous computing, especially the concept of Industry 4.0 and IoT applications, imposes the need to enhance AC methods since the traditional methods are not able to answer the increasing demand for privacy and security standards. To address this issue, we propose a Hierarchical, Extensible, Advanced, and Dynamic (HEAD) AC metamodel for dynamic and heterogeneous structures that is able to encompass the heterogeneity of the existing AC models. Various AC models can be derived, and different static and dynamic AC policies can be generated using its components. We use Eclipse (xtext) to define the grammar of our AC metamodel. We illustrate our approach with several successful instantiations for various models and hybrid models. Additionally, we provide some examples to show how some of the derived models can be implemented to generate AC policies.}, } @article {pmid34640820, year = {2021}, author = {Li, S and Hu, X and Du, Y}, title = {Deep Reinforcement Learning for Computation Offloading and Resource Allocation in Unmanned-Aerial-Vehicle Assisted Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {19}, pages = {}, pmid = {34640820}, issn = {1424-8220}, support = {11461038//National Natural Science Foundation of China/ ; 2020A-033//Innovation Foundation of Colleges and Universities in Gansu Province/ ; }, abstract = {Computation offloading technology extends cloud computing to the edge of the access network close to users, bringing many benefits to terminal devices with limited battery and computational resources. Nevertheless, the existing computation offloading approaches are challenging to apply to specific scenarios, such as the dense distribution of end-users and the sparse distribution of network infrastructure. The technological revolution in the unmanned aerial vehicle (UAV) and chip industry has granted UAVs more computing resources and promoted the emergence of UAV-assisted mobile edge computing (MEC) technology, which could be applied to those scenarios. However, in the MEC system with multiple users and multiple servers, making reasonable offloading decisions and allocating system resources is still a severe challenge. This paper studies the offloading decision and resource allocation problem in the UAV-assisted MEC environment with multiple users and servers. To ensure the quality of service for end-users, we set the weighted total cost of delay, energy consumption, and the size of discarded tasks as our optimization objective. We further formulate the joint optimization problem as a Markov decision process and apply the soft actor-critic (SAC) deep reinforcement learning algorithm to optimize the offloading policy. Numerical simulation results show that the offloading policy optimized by our proposed SAC-based dynamic computing offloading (SACDCO) algorithm effectively reduces the delay, energy consumption, and size of discarded tasks for the UAV-assisted MEC system. Compared with the fixed local-UAV scheme in the specific simulation setting, our proposed approach reduces system delay and energy consumption by approximately 50% and 200%, respectively.}, } @article {pmid34631002, year = {2021}, author = {Jiang, N and Wang, L and Xu, X}, title = {Research on Smart Healthcare Services: Based on the Design of APP Health Service Platform.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {9922389}, pmid = {34631002}, issn = {2040-2309}, mesh = {Big Data ; *Cloud Computing ; Delivery of Health Care ; *Health Services ; Humans ; }, abstract = {With the development of information technology, big data, and cloud computing, the concept of smart healthcare has gradually become more and more important. Compared with the traditional healthcare service, the new model, health service platform, is becoming increasingly popular and convenient. The use of wearable monitoring devices and some APPs is improving the health monitoring efficacy and effectiveness. To improve and facilitate the smart healthcare services, an effective and convenient app health service platform is needed urgently to serve the older and younger. Based on the above, this paper elaborates the principles of health service system and health information perception terminal design of the APP health service platform.}, } @article {pmid34621367, year = {2021}, author = {Mehrtak, M and SeyedAlinaghi, S and MohsseniPour, M and Noori, T and Karimi, A and Shamsabadi, A and Heydari, M and Barzegary, A and Mirzapour, P and Soleymanzadeh, M and Vahedi, F and Mehraeen, E and Dadras, O}, title = {Security challenges and solutions using healthcare cloud computing.}, journal = {Journal of medicine and life}, volume = {14}, number = {4}, pages = {448-461}, pmid = {34621367}, issn = {1844-3117}, mesh = {*Cloud Computing ; *Computer Security ; Confidentiality ; Delivery of Health Care ; Humans ; Software ; }, abstract = {Cloud computing is among the most beneficial solutions to digital problems. Security is one of the focal issues in cloud computing technology, and this study aims at investigating security issues of cloud computing and their probable solutions. A systematic review was performed using Scopus, Pubmed, Science Direct, and Web of Science databases. Once the title and abstract were evaluated, the quality of studies was assessed in order to choose the most relevant according to exclusion and inclusion criteria. Then, the full texts of studies selected were read thoroughly to extract the necessary results. According to the review, data security, availability, and integrity, as well as information confidentiality and network security, were the major challenges in cloud security. Further, data encryption, authentication, and classification, besides application programming interfaces (API), were security solutions to cloud infrastructure. Data encryption could be applied to store and retrieve data from the cloud in order to provide secure communication. Besides, several central challenges, which make the cloud security engineering process problematic, have been considered in this study.}, } @article {pmid34616887, year = {2021}, author = {B M Mansour, M and Abdelkader, T and Hashem, M and El-Horbaty, EM}, title = {An integrated three-tier trust management framework in mobile edge computing using fuzzy logic.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e700}, pmid = {34616887}, issn = {2376-5992}, abstract = {Mobile edge computing (MEC) is introduced as part of edge computing paradigm, that exploit cloud computing resources, at a nearer premises to service users. Cloud service users often search for cloud service providers to meet their computational demands. Due to the lack of previous experience between cloud service providers and users, users hold several doubts related to their data security and privacy, job completion and processing performance efficiency of service providers. This paper presents an integrated three-tier trust management framework that evaluates cloud service providers in three main domains: Tier I, which evaluates service provider compliance to the agreed upon service level agreement; Tier II, which computes the processing performance of a service provider based on its number of successful processes; and Tier III, which measures the violations committed by a service provider, per computational interval, during its processing in the MEC network. The three-tier evaluation is performed during Phase I computation. In Phase II, a service provider total trust value and status are gained through the integration of the three tiers using the developed overall trust fuzzy inference system (FIS). The simulation results of Phase I show the service provider trust value in terms of service level agreement compliance, processing performance and measurement of violations independently. This disseminates service provider's points of failure, which enables a service provider to enhance its future performance for the evaluated domains. The Phase II results show the overall trust value and status per service provider after integrating the three tiers using overall trust FIS. The proposed model is distinguished among other models by evaluating different parameters for a service provider.}, } @article {pmid34616535, year = {2021}, author = {Yu, Y}, title = {Cloud Computing into Respiratory Rehabilitation Training-Assisted Treatment of Patients with Pneumonia.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {5884174}, pmid = {34616535}, issn = {2040-2309}, mesh = {*Cloud Computing ; Humans ; *Pneumonia ; }, abstract = {In order to study the therapeutic effect of respiratory rehabilitation training on patients with pneumonia, this paper proposes an integrated adjuvant therapy program based on the cloud computing model. A total of 60 pneumonia patients admitted to Zhujiang Hospital of Southern Medical University from January to July 2020 were selected as the research objects and Southern Medical University pneumonia patients as the research object, to be evenly divided into two groups, each group of 30 people. The control group was treated with conventional anti-infection treatment, and the observation group was treated with supplementary respiratory rehabilitation training on the basis of conventional treatment. The therapeutic effects of the two groups were compared. The results showed that the absorption time of lung lesions was (9.17 ± 3.46) days in the observation group and (13.97 ± 3.07) days in the control group, and the difference between the two groups was statistically significant (t = 5.683, P < 0.001). Respiratory therapy based on the cloud computing model has the characteristics of integration and extensibility, which can be effectively applied to the treatment effect analysis of patients with pneumonia and is of great significance for the effective analysis of patients' blood gas indexes and lung function indexes.}, } @article {pmid34616533, year = {2021}, author = {Du, Z and Hu, X and Wu, J}, title = {Application of Cloud Computing in the Prediction of Exercise Improvement of Cardiovascular and Digestive Systems in Obese Patients.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {4695722}, pmid = {34616533}, issn = {2040-2309}, mesh = {Body Mass Index ; *Cloud Computing ; Exercise ; Humans ; *Obesity ; Overweight ; }, abstract = {Based on the cardiovascular and digestive problems of obese patients, this paper adopted the cloud computing method and selected 100 subjects with big data (23 normal weight subjects, 3740 overweight patients, and 40 obese patients) as the research objects, studying the heart configuration and their digestive system of obese people. Results show that BMI = L (24 ≥ BMI > 27.9) and BMI = XL (BMI > 27.9) were identified as target correlation projects in this experiment, associated with each cardiac structural parameters, respectively. Cloud computing facilitates early detection, early prevention, and early intervention in heart configuration changes in overweight and obese patients.}, } @article {pmid34614001, year = {2021}, author = {Zhang, R and Song, Y}, title = {Relationship between employees' career maturity and career planning of edge computing and cloud collaboration from the perspective of organizational behavior.}, journal = {PloS one}, volume = {16}, number = {10}, pages = {e0257582}, pmid = {34614001}, issn = {1932-6203}, mesh = {Algorithms ; *Career Mobility ; *Cloud Computing ; Computer Simulation ; Humans ; *Internet of Things ; }, abstract = {A new IoT (Internet of Things) analysis platform is designed based on edge computing and cloud collaboration from the perspective of organizational behavior, to fundamentally understand the relationship between enterprise career maturity and career planning, and meet the actual needs of enterprises. The performance of the proposed model is further determined according to the characteristic of the edge near data sources, with the help of factor analysis, and through the study and analysis of relevant enterprise data. The model is finally used to analyze the relationship between enterprise career maturity and career planning through simulation experiments. The research results prove that career maturity positively affects career planning, and vocational delay of gratification plays a mediating role in career maturity and career planning. Besides, the content of career choice in career maturity is influenced by mental acuity, result acuity and loyalty. The experimental results indicate that when the load at both ends of the edge and cloud exceeds 80%, the edge delay of the IoT analysis platform based on edge computing and cloud collaboration is 10s faster than that of other models. Meanwhile, the system slowdown is reduced by 36% while the stability is increased when the IoT analysis platform analyzes data. The results of the edge-cloud collaboration scheduling scheme are similar to all scheduling to the edge end, which saves 19% of the time compared with cloud computing to the cloud end. In Optical Character Recognition and Aeneas, compared with the single edge-cloud coordination mode, the model with the Nesterov Accelerated Gradient algorithm achieves the optimal performance. Specifically, the communication delay is reduced by about 25% on average, and the communication time decreased by 61% compared with cloud computing to the edge end. This work has significant reference value for analyzing the relationship between enterprise psychology, behavior, and career planning.}, } @article {pmid34608413, year = {2021}, author = {Chao, G and Gang, W}, title = {Sports Training Teaching Device Based on Big Data and Cloud Computing.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {7339486}, pmid = {34608413}, issn = {2040-2309}, mesh = {*Big Data ; *Cloud Computing ; Humans ; Research Design ; }, abstract = {With the advent of the era of big data (BD), people have higher requirements for information, knowledge, and technology. Taking the Internet as the carrier, the use of cloud computing technology for distance education has become a trend. Our country's physical training teaching has also begun to change from traditional mode to modern mode. In order to improve the overall quality of our country's national sports, this paper studies the teaching device of sports training based on BD and cloud computing. This article mainly uses the questionnaire survey method, the experimental analysis method, the data analysis method, and the data statistics method to have an in-depth understanding of the research theme and uses swimming as an example to design the sports training device. 52% of people think that water in the ears and itching during swimming are more serious problems. After further understanding, an experimental design was carried out. Experimental studies have shown that the combination of BD and cloud computing can effectively solve the problems existing in the traditional teaching model, so as to achieve the goal of efficient and rapid development.}, } @article {pmid34604515, year = {2021}, author = {Khedr, AE and Idrees, AM and Salem, R}, title = {Enhancing the e-learning system based on a novel tasks' classification load-balancing algorithm.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e669}, pmid = {34604515}, issn = {2376-5992}, abstract = {In the educational field, the system performance, as well as the stakeholders' satisfaction, are considered a bottleneck in the e-learning system due to the high number of users who are represented in the educational system's stakeholders including instructors and students. On the other hand, successful resource utilization in cloud systems is one of the key factors for increasing system performance which is strongly related to the ability for the optimal load distribution. In this study, a novel load-balancing algorithm is proposed. The proposed algorithm aims to optimize the educational system's performance and, consequently, the users' satisfaction in the educational field represented by the students. The proposed enhancement in the e-learning system has been evaluated by two methods, first, a simulation experiment for confirming the applicability of the proposed algorithm. Then a real-case experiment has been applied to the e-learning system at Helwan University. The results revealed the advantages of the proposed algorithm over other well-known load balancing algorithms. A questionnaire was also developed to measure the users' satisfaction with the system's performance. A total of 3,670 thousand out of 5,000 students have responded, and the results have revealed a satisfaction percentage of 95.4% in the e-learning field represented by the students.}, } @article {pmid34603926, year = {2021}, author = {Loeza-Mejía, CI and Sánchez-DelaCruz, E and Pozos-Parra, P and Landero-Hernández, LA}, title = {The potential and challenges of Health 4.0 to face COVID-19 pandemic: a rapid review.}, journal = {Health and technology}, volume = {11}, number = {6}, pages = {1321-1330}, pmid = {34603926}, issn = {2190-7188}, abstract = {The COVID-19 pandemic has generated the need to evolve health services to reduce the risk of contagion and promote a collaborative environment even remotely. Advances in Industry 4.0, including the internet of things, mobile networks, cloud computing, and artificial intelligence make Health 4.0 possible to connect patients with healthcare professionals. Hence, the focus of this work is analyzing the potentiality, and challenges of state-of-the-art Health 4.0 applications to face the COVID-19 pandemic including augmented environments, diagnosis of the virus, forecasts, medical robotics, and remote clinical services. It is concluded that Health 4.0 can be applied in the prevention of contagion, improve diagnosis, promote virtual learning environments, and offer remote services. However, there are still ethical, technical, security, and legal challenges to be addressed. Additionally, more imaging datasets for COVID-19 detection need to be made available to the scientific community. Working in the areas of opportunity will help to address the new normal. Likewise, Health 4.0 can be applied not only in the COVID-19 pandemic, but also in future global viruses and natural disasters.}, } @article {pmid34597967, year = {2022}, author = {Nagel, GW and de Moraes Novo, EML and Martins, VS and Campos-Silva, JV and Barbosa, CCF and Bonnet, MP}, title = {Impacts of meander migration on the Amazon riverine communities using Landsat time series and cloud computing.}, journal = {The Science of the total environment}, volume = {806}, number = {Pt 2}, pages = {150449}, doi = {10.1016/j.scitotenv.2021.150449}, pmid = {34597967}, issn = {1879-1026}, mesh = {Animals ; *Cloud Computing ; *Ecosystem ; Models, Theoretical ; Rivers ; Time Factors ; }, abstract = {River meander migration is a process that maintains biodiverse riparian ecosystems by producing highly sinuous rivers, and oxbow lakes. However, although the floodplains support communities with fish and other practices in the region, meandering rivers can directly affect the life of local communities. For example, erosion of river banks promotes the loss of land on community shores, while sedimentation increases the distance from house to the river. Therefore, communities living along the Juruá River, one of the most sinuous rivers on Earth, are vulnerable to long-term meander migration. In this study, the river meander migration was detected by using Landsat 5-8 data from 1984 to 2020. A per-pixel Water Surface Change Detection Algorithm (WSCDA) was developed to classify regions subject to erosion and sedimentation processes by applying temporal regressions on the water index, called Modified Normalized Difference Water Index (mNDWI). The WSCDA classified the meander migration with omission and commission errors lower than 13.44% and 7.08%, respectively. Then, the number of riparian communities was mapped using high spatial resolution SPOT images. A total of 369 communities with no road access were identified, the majority of which living in stable regions (58.8%), followed by sedimentation (26.02%) and erosion (15.18%) areas. Furthermore, we identified that larger communities (>20 houses) tend to live in more stable locations (70%) compared to smaller communities (1-10 houses) with 55.6%. A theoretical model was proposed to illustrate the main impacts of meander migration on the communities, related to Inundation, Mobility Change, and Food Security. This is the first study exploring the relationship between meander migration and riverine communities at watershed-level, and the results support the identification of vulnerable communities to improve local planning and floodplain conservation.}, } @article {pmid34596963, year = {2021}, author = {Nikam, RD and Lee, J and Choi, W and Banerjee, W and Kwak, M and Yadav, M and Hwang, H}, title = {Ionic Sieving Through One-Atom-Thick 2D Material Enables Analog Nonvolatile Memory for Neuromorphic Computing.}, journal = {Small (Weinheim an der Bergstrasse, Germany)}, volume = {17}, number = {44}, pages = {e2103543}, doi = {10.1002/smll.202103543}, pmid = {34596963}, issn = {1613-6829}, mesh = {*Electronics ; Ions ; }, abstract = {The first report on ion transport through atomic sieves of atomically thin 2D material is provided to solve critical limitations of electrochemical random-access memory (ECRAM) devices. Conventional ECRAMs have random and localized ion migration paths; as a result, the analog switching efficiency is inadequate to perform in-memory logic operations. Herein ion transport path scaled down to the one-atom-thick (≈0.33 nm) hexagonal boron nitride (hBN), and the ionic transport area is confined to a small pore (≈0.3 nm[2]) at the single-hexagonal ring. One-atom-thick hBN has ion-permeable pores at the center of each hexagonal ring due to weakened electron cloud and highly polarized B-N bond. The experimental evidence indicates that the activation energy barrier for H[+] ion transport through single-layer hBN is ≈0.51 eV. Benefiting from the controlled ionic sieving through single-layer hBN, the ECRAMs exhibit superior nonvolatile analog switching with good memory retention and high endurance. The proposed approach enables atomically thin 2D material as an ion transport layer to regulate the switching of various ECRAM devices for artificial synaptic electronics.}, } @article {pmid34595915, year = {2021}, author = {Arantes, PR and Polêto, MD and Pedebos, C and Ligabue-Braun, R}, title = {Making it Rain: Cloud-Based Molecular Simulations for Everyone.}, journal = {Journal of chemical information and modeling}, volume = {61}, number = {10}, pages = {4852-4856}, doi = {10.1021/acs.jcim.1c00998}, pmid = {34595915}, issn = {1549-960X}, mesh = {*Cloud Computing ; *Molecular Dynamics Simulation ; }, abstract = {We present a user-friendly front-end for running molecular dynamics (MD) simulations using the OpenMM toolkit on the Google Colab framework. Our goals are (1) to highlight the usage of a cloud-computing scheme for educational purposes for a hands-on approach when learning MD simulations and (2) to exemplify how low-income research groups can perform MD simulations in the microsecond time scale. We hope this work facilitates teaching and learning of molecular simulation throughout the community.}, } @article {pmid34591938, year = {2021}, author = {Yang, X and Xi, W and Chen, A and Wang, C}, title = {An environmental monitoring data sharing scheme based on attribute encryption in cloud-fog computing.}, journal = {PloS one}, volume = {16}, number = {9}, pages = {e0258062}, pmid = {34591938}, issn = {1932-6203}, mesh = {*Cloud Computing ; *Computer Security ; Environmental Monitoring/*methods ; Information Dissemination ; Information Technology ; }, abstract = {Environmental monitoring plays a vital role in environmental protection, especially for the management and conservation of natural resources. However, environmental monitoring data is usually difficult to resist malicious attacks because it is transmitted in an open and insecure channel. In our paper, a new data sharing scheme is proposed by using attribute-based encryption, identity-based signature and cloud computing technology to meet the requirements of confidentiality, integrity, verifiability, and unforgerability of environmental monitoring data. The monitoring equipment encrypts the monitored environmental data and uploads it to the environmental cloud server. Then, monitoring users can request access to the environmental cloud server. If the monitoring user meets the access policy, the plaintext is finally obtained through the fog node decryption. Our proposal mainly uses attribute-based encryption technology to realize the privacy protection and fine-grained access control of monitoring data. The integrity and unforgeability of the monitoring data are ensured by the digital signature. In addition, outsourcing computing technology saves the computing overhead of monitoring equipment and monitoring users. The security analysis illustrates that our proposal can achieve security purposes. Finally, the performance of our proposal and related schemes is evaluated from the aspects of communication overhead and computing overhead. The results indicate that our proposal is secure and efficient in environmental monitoring.}, } @article {pmid34591883, year = {2021}, author = {Shi, W}, title = {Analyzing enterprise asset structure and profitability using cloud computing and strategic management accounting.}, journal = {PloS one}, volume = {16}, number = {9}, pages = {e0257826}, pmid = {34591883}, issn = {1932-6203}, mesh = {Accounting/*methods ; Cloud Computing ; Humans ; Industry/economics ; Investments/*economics ; }, abstract = {The study expects to further exploring the role of asset structure in enterprise profitability, and analyze the relationship between them in detail. Taking the express industry as the research object, from strategic management accounting, the study uses edge computing and related analysis tools and compares the financial and non-financial indicators of existing express enterprises. The study also discusses the differences between asset structure allocation and sustainable profitability, and constructs the corresponding analysis framework. The results reveal that SF's total assets are obviously large and the profit margin increases. While the total assets of other express enterprises are small, and the express revenue drops sharply. Heavy assets can improve the enterprises' profitability to a certain extent. SF has a good asset management ability. With the support of the capital market, SF's net asset growth ability has been greatly improved. The edge computing method used has higher local data processing ability, and the analysis framework has higher performance than the big data processing method. The study can provide some research ideas and practical value for the asset structure analysis and profitability evaluation of express enterprises.}, } @article {pmid34580553, year = {2022}, author = {Ullah, A and Nawi, NM and Ouhame, S}, title = {Recent advancement in VM task allocation system for cloud computing: review from 2015 to2021.}, journal = {Artificial intelligence review}, volume = {55}, number = {3}, pages = {2529-2573}, pmid = {34580553}, issn = {0269-2821}, abstract = {Cloud computing is new technology that has considerably changed human life at different aspect over the last decade. Especially after the COVID-19 pandemic, almost all life activity shifted into cloud base. Cloud computing is a utility where different hardware and software resources are accessed on pay per user ground base. Most of these resources are available in virtualized form and virtual machine (VM) is one of the main elements of visualization.VM used in data center for distribution of resource and application according to benefactor demand. Cloud data center faces different issue in respect of performance and efficiency for improvement of these issues different approaches are used. Virtual machine play important role for improvement of data center performance therefore different approach are used for improvement of virtual machine efficiency (i-e) load balancing of resource and task. For the improvement of this section different parameter of VM improve like makespan, quality of service, energy, data accuracy and network utilization. Improvement of different parameter in VM directly improve the performance of cloud computing. Therefore, we conducting this review paper that we can discuss about various improvements that took place in VM from 2015 to 20,201. This review paper also contain information about various parameter of cloud computing and final section of paper present the role of machine learning algorithm in VM as well load balancing approach along with the future direction of VM in cloud data center.}, } @article {pmid34577655, year = {2021}, author = {Sun, M and Bao, T and Xie, D and Lv, H and Si, G}, title = {Towards Application-Driven Task Offloading in Edge Computing Based on Deep Reinforcement Learning.}, journal = {Micromachines}, volume = {12}, number = {9}, pages = {}, pmid = {34577655}, issn = {2072-666X}, abstract = {Edge computing is a new paradigm, which provides storage, computing, and network resources between the traditional cloud data center and terminal devices. In this paper, we concentrate on the application-driven task offloading problem in edge computing by considering the strong dependencies of sub-tasks for multiple users. Our objective is to joint optimize the total delay and energy generated by applications, while guaranteeing the quality of services of users. First, we formulate the problem for the application-driven tasks in edge computing by jointly considering the delays and the energy consumption. Based on that, we propose a novel Application-driven Task Offloading Strategy (ATOS) based on deep reinforcement learning by adding a preliminary sorting mechanism to realize the joint optimization. Specifically, we analyze the characteristics of application-driven tasks and propose a heuristic algorithm by introducing a new factor to determine the processing order of parallelism sub-tasks. Finally, extensive experiments validate the effectiveness and reliability of the proposed algorithm. To be specific, compared with the baseline strategies, the total cost reduction by ATOS can be up to 64.5% on average.}, } @article {pmid34577465, year = {2021}, author = {Molnár, S and Kelényi, B and Tamas, L}, title = {Feature Pyramid Network Based Efficient Normal Estimation and Filtering for Time-of-Flight Depth Cameras.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {18}, pages = {}, pmid = {34577465}, issn = {1424-8220}, support = {PN-III-P2-2.1-PTE-2019-0367//Unitatea Executiva pentru Finantarea Invatamantului Superior, a Cercetarii, Dezvoltarii si Inovarii/ ; }, mesh = {*Algorithms ; *Cloud Computing ; }, abstract = {In this paper, an efficient normal estimation and filtering method for depth images acquired by Time-of-Flight (ToF) cameras is proposed. The method is based on a common feature pyramid networks (FPN) architecture. The normal estimation method is called ToFNest, and the filtering method ToFClean. Both of these low-level 3D point cloud processing methods start from the 2D depth images, projecting the measured data into the 3D space and computing a task-specific loss function. Despite the simplicity, the methods prove to be efficient in terms of robustness and runtime. In order to validate the methods, extensive evaluations on public and custom datasets were performed. Compared with the state-of-the-art methods, the ToFNest and ToFClean algorithms are faster by an order of magnitude without losing precision on public datasets.}, } @article {pmid34577460, year = {2021}, author = {Nguyen, TA and Fe, I and Brito, C and Kaliappan, VK and Choi, E and Min, D and Lee, JW and Silva, FA}, title = {Performability Evaluation of Load Balancing and Fail-over Strategies for Medical Information Systems with Edge/Fog Computing Using Stochastic Reward Nets.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {18}, pages = {}, pmid = {34577460}, issn = {1424-8220}, support = {309335/2017-5//Brazilian National Council for Scientific and Technological Development - CNPq/ ; 2020R1A6A1A03046811//Basic Science Research Program through the National Research Foundation of Korea(NRF) funded by the Ministry of Education/ ; }, mesh = {*Cloud Computing ; Computer Simulation ; *Computer Systems ; Humans ; Information Systems ; Reward ; }, abstract = {The aggressive waves of ongoing world-wide virus pandemics urge us to conduct further studies on the performability of local computing infrastructures at hospitals/medical centers to provide a high level of assurance and trustworthiness of medical services and treatment to patients, and to help diminish the burden and chaos of medical management and operations. Previous studies contributed tremendous progress on the dependability quantification of existing computing paradigms (e.g., cloud, grid computing) at remote data centers, while a few works investigated the performance of provided medical services under the constraints of operational availability of devices and systems at local medical centers. Therefore, it is critical to rapidly develop appropriate models to quantify the operational metrics of medical services provided and sustained by medical information systems (MIS) even before practical implementation. In this paper, we propose a comprehensive performability SRN model of an edge/fog based MIS for the performability quantification of medical data transaction and services in local hospitals or medical centers. The model elaborates different failure modes of fog nodes and their VMs under the implementation of fail-over mechanisms. Sophisticated behaviors and dependencies between the performance and availability of data transactions are elaborated in a comprehensive manner when adopting three main load-balancing techniques including: (i) probability-based, (ii) random-based and (iii) shortest queue-based approaches for medical data distribution from edge to fog layers along with/without fail-over mechanisms in the cases of component failures at two levels of fog nodes and fog virtual machines (VMs). Different performability metrics of interest are analyzed including (i) recover token rate, (ii) mean response time, (iii) drop probability, (iv) throughput, (v) queue utilization of network devices and fog nodes to assimilate the impact of load-balancing techniques and fail-over mechanisms. Discrete-event simulation results highlight the effectiveness of the combination of these for enhancing the performability of medical services provided by an MIS. Particularly, performability metrics of medical service continuity and quality are improved with fail-over mechanisms in the MIS while load balancing techniques help to enhance system performance metrics. The implementation of both load balancing techniques along with fail-over mechanisms provide better performability metrics compared to the separate cases. The harmony of the integrated strategies eventually provides the trustworthiness of medical services at a high level of performability. This study can help improve the design of MIS systems integrated with different load-balancing techniques and fail-over mechanisms to maintain continuous performance under the availability constraints of medical services with heavy computing workloads in local hospitals/medical centers, to combat with new waves of virus pandemics.}, } @article {pmid34577450, year = {2021}, author = {Gendreau Chakarov, A and Biddy, Q and Hennessy Elliott, C and Recker, M}, title = {The Data Sensor Hub (DaSH): A Physical Computing System to Support Middle School Inquiry Science Instruction.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {18}, pages = {}, pmid = {34577450}, issn = {1424-8220}, support = {1742053//National Science Foundation/ ; 1742046//National Science Foundation/ ; 2019805//National Science Foundation/ ; 220020587//James S. McDonald Foundation/ ; }, mesh = {Humans ; Schools ; *Science ; Students ; Writing ; }, abstract = {This article describes a sensor-based physical computing system, called the Data Sensor Hub (DaSH), which enables students to process, analyze, and display data streams collected using a variety of sensors. The system is built around the portable and affordable BBC micro:bit microcontroller (expanded with the gator:bit), which students program using a visual, cloud-based programming environment intended for novices. Students connect a variety of sensors (measuring temperature, humidity, carbon dioxide, sound, acceleration, magnetism, etc.) and write programs to analyze and visualize the collected sensor data streams. The article also describes two instructional units intended for middle grade science classes that use this sensor-based system. These inquiry-oriented units engage students in designing the system to collect data from the world around them to investigate scientific phenomena of interest. The units are designed to help students develop the ability to meaningfully integrate computing as they engage in place-based learning activities while using tools that more closely approximate the practices of contemporary scientists as well as other STEM workers. Finally, the article articulates how the DaSH and units have elicited different kinds of teacher practices using student drawn modeling activities, facilitating debugging practices, and developing place-based science practices.}, } @article {pmid34577425, year = {2021}, author = {Corches, C and Daraban, M and Miclea, L}, title = {Availability of an RFID Object-Identification System in IoT Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {18}, pages = {}, pmid = {34577425}, issn = {1424-8220}, mesh = {Cloud Computing ; Humans ; *Internet of Things ; *Radio Frequency Identification Device ; Technology ; }, abstract = {Through the latest technological and conceptual developments, the centralized cloud-computing approach has moved to structures such as edge, fog, and the Internet of Things (IoT), approaching end users. As mobile network operators (MNOs) implement the new 5G standards, enterprise computing function shifts to the edge. In parallel to interconnection topics, there is the issue of global impact over the environment. The idea is to develop IoT devices to eliminate the greenhouse effect of current applications. Radio-frequency identification (RFID) is the technology that has this potential, and it can be used in applications ranging from identifying a person to granting access in a building. Past studies have focused on how to improve RFID communication or to achieve maximal throughput. However, for many applications, system latency and availability are critical aspects. This paper examines, through stochastic Petri nets (SPNs), the availability, dependability, and latency of an object-identification system that uses RFID tags. Through the performed analysis, the optimal balance between latency and throughput was identified. Analyzing multiple communication scenarios revealed the availability of such a system when deployed at the edge layer.}, } @article {pmid34577416, year = {2021}, author = {Chen, X and Xiao, S}, title = {Multi-Objective and Parallel Particle Swarm Optimization Algorithm for Container-Based Microservice Scheduling.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {18}, pages = {}, pmid = {34577416}, issn = {1424-8220}, abstract = {An application based on a microservice architecture with a set of independent, fine-grained modular services is desirable, due to its low management cost, simple deployment, and high portability. This type of container technology has been widely used in cloud computing. Several methods have been applied to container-based microservice scheduling, but they come with significant disadvantages, such as high network transmission overhead, ineffective load balancing, and low service reliability. In order to overcome these disadvantages, in this study, we present a multi-objective optimization problem for container-based microservice scheduling. Our approach is based on the particle swarm optimization algorithm, combined parallel computing, and Pareto-optimal theory. The particle swarm optimization algorithm has fast convergence speed, fewer parameters, and many other advantages. First, we detail the various resources of the physical nodes, cluster, local load balancing, failure rate, and other aspects. Then, we discuss our improvement with respect to the relevant parameters. Second, we create a multi-objective optimization model and use a multi-objective optimization parallel particle swarm optimization algorithm for container-based microservice scheduling (MOPPSO-CMS). This algorithm is based on user needs and can effectively balance the performance of the cluster. After comparative experiments, we found that the algorithm can achieve good results, in terms of load balancing, network transmission overhead, and optimization speed.}, } @article {pmid34577258, year = {2021}, author = {Belabed, T and Ramos Gomes da Silva, V and Quenon, A and Valderamma, C and Souani, C}, title = {A Novel Automate Python Edge-to-Edge: From Automated Generation on Cloud to User Application Deployment on Edge of Deep Neural Networks for Low Power IoT Systems FPGA-Based Acceleration.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {18}, pages = {}, pmid = {34577258}, issn = {1424-8220}, mesh = {Acceleration ; Computers ; *Neural Networks, Computer ; *Software ; }, abstract = {Deep Neural Networks (DNNs) deployment for IoT Edge applications requires strong skills in hardware and software. In this paper, a novel design framework fully automated for Edge applications is proposed to perform such a deployment on System-on-Chips. Based on a high-level Python interface that mimics the leading Deep Learning software frameworks, it offers an easy way to implement a hardware-accelerated DNN on an FPGA. To do this, our design methodology covers the three main phases: (a) customization: where the user specifies the optimizations needed on each DNN layer, (b) generation: the framework generates on the Cloud the necessary binaries for both FPGA and software parts, and (c) deployment: the SoC on the Edge receives the resulting files serving to program the FPGA and related Python libraries for user applications. Among the study cases, an optimized DNN for the MNIST database can speed up more than 60× a software version on the ZYNQ 7020 SoC and still consume less than 0.43W. A comparison with the state-of-the-art frameworks demonstrates that our methodology offers the best trade-off between throughput, power consumption, and system cost.}, } @article {pmid34577248, year = {2021}, author = {Li, H and An, Z and Zuo, S and Zhu, W and Zhang, Z and Zhang, S and Zhang, C and Song, W and Mao, Q and Mu, Y and Li, E and García, JDP}, title = {Artificial Intelligence-Enabled ECG Algorithm Based on Improved Residual Network for Wearable ECG.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {18}, pages = {}, pmid = {34577248}, issn = {1424-8220}, support = {61675154//National Natural Science Foundation of China/ ; 19YFZCSY00180//Tianjin Key Research and Development Program/ ; 18ZXJMTG00260//Tianjin Major Project for Civil-Military Integration of Science and Technology/ ; 20YDTPJC01380//Tianjin Science and Technology Program/ ; XB202007//Tianjin Municipal Special Foundation for Key Cultivation of China/ ; }, mesh = {Algorithms ; Artificial Intelligence ; *Atrial Fibrillation ; Electrocardiography ; Humans ; *Wearable Electronic Devices ; }, abstract = {Heart disease is the leading cause of death for men and women globally. The residual network (ResNet) evolution of electrocardiogram (ECG) technology has contributed to our understanding of cardiac physiology. We propose an artificial intelligence-enabled ECG algorithm based on an improved ResNet for a wearable ECG. The system hardware consists of a wearable ECG with conductive fabric electrodes, a wireless ECG acquisition module, a mobile terminal App, and a cloud diagnostic platform. The algorithm adopted in this study is based on an improved ResNet for the rapid classification of different types of arrhythmia. First, we visualize ECG data and convert one-dimensional ECG signals into two-dimensional images using Gramian angular fields. Then, we improve the ResNet-50 network model, add multistage shortcut branches to the network, and optimize the residual block. The ReLu activation function is replaced by a scaled exponential linear units (SELUs) activation function to improve the expression ability of the model. Finally, the images are input into the improved ResNet network for classification. The average recognition rate of this classification algorithm against seven types of arrhythmia signals (atrial fibrillation, atrial premature beat, ventricular premature beat, normal beat, ventricular tachycardia, atrial tachycardia, and sinus bradycardia) is 98.3%.}, } @article {pmid34574966, year = {2021}, author = {da Fonseca, MH and Kovaleski, F and Picinin, CT and Pedroso, B and Rubbo, P}, title = {E-Health Practices and Technologies: A Systematic Review from 2014 to 2019.}, journal = {Healthcare (Basel, Switzerland)}, volume = {9}, number = {9}, pages = {}, pmid = {34574966}, issn = {2227-9032}, support = {0001//Coordenação de Aperfeiçoamento de Pessoal de Nível Superior/ ; 0001//Fundação Araucária/ ; }, abstract = {E-health can be defined as a set of technologies applied with the help of the internet, in which healthcare services are provided to improve quality of life and facilitate healthcare delivery. As there is a lack of similar studies on the topic, this analysis uses a systematic literature review of articles published from 2014 to 2019 to identify the most common e-health practices used worldwide, as well as the main services provided, diseases treated, and the associated technologies that assist in e-health practices. Some of the key results were the identification of the four most common practices used (mhealth or mobile health; telehealth or telemedicine; technology; and others) and the most widely used technologies associated with e-health (IoT, cloud computing, Big Data, security, and systems).}, } @article {pmid34574593, year = {2021}, author = {Oh, SR and Seo, YD and Lee, E and Kim, YG}, title = {A Comprehensive Survey on Security and Privacy for Electronic Health Data.}, journal = {International journal of environmental research and public health}, volume = {18}, number = {18}, pages = {}, pmid = {34574593}, issn = {1660-4601}, mesh = {Cloud Computing ; *Computer Security ; Delivery of Health Care ; Electronic Health Records ; *Privacy ; }, abstract = {Recently, the integration of state-of-the-art technologies, such as modern sensors, networks, and cloud computing, has revolutionized the conventional healthcare system. However, security concerns have increasingly been emerging due to the integration of technologies. Therefore, the security and privacy issues associated with e-health data must be properly explored. In this paper, to investigate the security and privacy of e-health systems, we identified major components of the modern e-health systems (i.e., e-health data, medical devices, medical networks and edge/fog/cloud). Then, we reviewed recent security and privacy studies that focus on each component of the e-health systems. Based on the review, we obtained research taxonomy, security concerns, requirements, solutions, research trends, and open challenges for the components with strengths and weaknesses of the analyzed studies. In particular, edge and fog computing studies for e-health security and privacy were reviewed since the studies had mostly not been analyzed in other survey papers.}, } @article {pmid34573662, year = {2021}, author = {Dineva, K and Atanasova, T}, title = {Design of Scalable IoT Architecture Based on AWS for Smart Livestock.}, journal = {Animals : an open access journal from MDPI}, volume = {11}, number = {9}, pages = {}, pmid = {34573662}, issn = {2076-2615}, abstract = {In the ecological future of the planet, intelligent agriculture relies on CPS and IoT to free up human resources and increase production efficiency. Due to the growing number of connected IoT devices, the maximum scalability capacity, and available computing power of the existing architectural frameworks will be reached. This necessitates finding a solution that meets the continuously growing demands in smart farming. Cloud-based IoT solutions are achieving increasingly high popularity. The aim of this study was to design a scalable cloud-based architecture for a smart livestock monitoring system following Agile methodology and featuring environmental monitoring, health, growth, behaviour, reproduction, emotional state, and stress levels of animals. The AWS services used, and their specific tasks related to the proposed architecture are explained in detail. A stress test was performed to prove the data ingesting and processing capability of the proposed architecture. Experimental results proved that the proposed architecture using AWS automated scaling mechanisms and IoT devices are fully capable of processing the growing amount of data, which in turn allow for meeting the required needs of the constantly expanding number of CPS systems.}, } @article {pmid34566264, year = {2023}, author = {Kumar, R and Al-Turjman, F and Srinivas, LNB and Braveen, M and Ramakrishnan, J}, title = {ANFIS for prediction of epidemic peak and infected cases for COVID-19 in India.}, journal = {Neural computing & applications}, volume = {35}, number = {10}, pages = {7207-7220}, pmid = {34566264}, issn = {0941-0643}, abstract = {Corona Virus Disease 2019 (COVID-19) is a continuing extensive incident globally affecting several million people's health and sometimes leading to death. The outbreak prediction and making cautious steps is the only way to prevent the spread of COVID-19. This paper presents an Adaptive Neuro-fuzzy Inference System (ANFIS)-based machine learning technique to predict the possible outbreak in India. The proposed ANFIS-based prediction system tracks the growth of epidemic based on the previous data sets fetched from cloud computing. The proposed ANFIS technique predicts the epidemic peak and COVID-19 infected cases through the cloud data sets. The ANFIS is chosen for this study as it has both numerical and linguistic knowledge, and also has ability to classify data and identify patterns. The proposed technique not only predicts the outbreak but also tracks the disease and suggests a measurable policy to manage the COVID-19 epidemic. The obtained prediction shows that the proposed technique very effectively tracks the growth of the COVID-19 epidemic. The result shows the growth of infection rate decreases at end of 2020 and also has delay epidemic peak by 40-60 days. The prediction result using the proposed ANFIS technique shows a low Mean Square Error (MSE) of 1.184 × 10[-3] with an accuracy of 86%. The study provides important information for public health providers and the government to control the COVID-19 epidemic.}, } @article {pmid34566262, year = {2021}, author = {Rufino Henrique, PS and Prasad, R}, title = {6G Networks for Next Generation of Digital TV Beyond 2030.}, journal = {Wireless personal communications}, volume = {121}, number = {2}, pages = {1363-1378}, pmid = {34566262}, issn = {0929-6212}, abstract = {This paper prosed a novel 6G QoS over the future 6G wireless architecture to offer excellent Quality of Service (QoS) for the next generation of digital TV beyond 2030. During the last 20 years, the way society used to watch and consume TV and Cinema has changed radically. The creation of the Over The Top content platforms based on Cloud Services followed by its commercial video consumption model, offering flexibility for subscribers such as n Video on Demand. Besides the new business model created, the network infrastructure and wireless technologies also permitted the streaming of high-quality TV and film formats such as High Definition, followed by the latest widespread TV standardization Ultra-High- Definition TV. Mobile Broadband services onset the possibility for consumers to watch TV or Video content anywhere at any time. However, the network infrastructure needs continuous improvement, primarily when crises, like the coronavirus disease (COVID-19) and the worldwide pandemic, creates immense network traffic congestions. The outcome of that congestion was the decrease of QoS for such multimedia services, impacting the user's experience. More power-hungry video applications are commencing to test the networks' resilience and future roadmap of 5G and Beyond 5G (B5G). For this, 6G architecture planning must be focused on offering the ultimate QoS for prosumers beyond 2030.}, } @article {pmid34563896, year = {2021}, author = {Jennings, MR and Turner, C and Bond, RR and Kennedy, A and Thantilage, R and Kechadi, MT and Le-Khac, NA and McLaughlin, J and Finlay, DD}, title = {Code-free cloud computing service to facilitate rapid biomedical digital signal processing and algorithm development.}, journal = {Computer methods and programs in biomedicine}, volume = {211}, number = {}, pages = {106398}, doi = {10.1016/j.cmpb.2021.106398}, pmid = {34563896}, issn = {1872-7565}, mesh = {Algorithms ; *Cloud Computing ; Programming Languages ; Signal Processing, Computer-Assisted ; *Software ; }, abstract = {BACKGROUND AND OBJECTIVE: Cloud computing has the ability to offload processing tasks to a remote computing resources. Presently, the majority of biomedical digital signal processing involves a ground-up approach by writing code in a variety of languages. This may reduce the time a researcher or health professional has to process data, while increasing the barrier to entry to those with little or no software development experience. In this study, we aim to provide a service capable of handling and processing biomedical data via a code-free interface. Furthermore, our solution should support multiple file formats and processing languages while saving user inputs for repeated use.

METHODS: A web interface via the Python-based Django framework was developed with the potential to shorten the time taken to create an algorithm, encourage code reuse, and democratise digital signal processing tasks for non-technical users using a code-free user interface. A user can upload data, create an algorithm and download the result. Using discrete functions and multi-lingual scripts (e.g. MATLAB or Python), the user can manipulate data rapidly in a repeatable manner. Multiple data file formats are supported by a decision-based file handler and user authentication-based storage allocation method.

RESULTS: The proposed system has been demonstrated as effective in handling multiple input data types in various programming languages, including Python and MATLAB. This, in turn, has the potential to reduce currently experienced bottlenecks in cross-platform development of bio-signal processing algorithms. The source code for this system has been made available to encourage reuse. A cloud service for digital signal processing has the ability to reduce the apparent complexity and abstract the need to understand the intricacies of signal processing.

CONCLUSION: We have introduced a web-based system capable of reducing the barrier to entry for inexperienced programmers. Furthermore, our system is reproducable and scalable for use in a variety of clinical or research fields.}, } @article {pmid34554331, year = {2021}, author = {Setiani, P and Devianto, LA and Ramdani, F}, title = {Rapid estimation of CO2 emissions from forest fire events using cloud-based computation of google earth engine.}, journal = {Environmental monitoring and assessment}, volume = {193}, number = {10}, pages = {669}, pmid = {34554331}, issn = {1573-2959}, mesh = {Carbon Dioxide/analysis ; Cloud Computing ; Environmental Monitoring ; *Fires ; Search Engine ; *Wildfires ; }, abstract = {One of the main sources of greenhouse gases is forest fire, with carbon dioxide as its main constituent. With increasing global surface temperatures, the probability of forest fire events also increases. A method that enables rapid quantification of emissions is even more necessary to estimate the environmental impact. This study introduces the application of the Google Earth Engine platform to monitor burned areas in forest fire events in Mount Arjuno, Indonesia, during the 2016-2019 period, using Landsat-8 and Sentinel-2 satellite imageries. The events particularly affected grassland and tropical forest areas, as well as a fraction of agricultural areas, with a total estimated emission of 2.5 × 10[3] tCO2/km[2] burned area. Higher carbon dioxide emissions were also observed, consistent with the higher local surface temperature as well as the CO total column mixing ratio average retrieved from Sentinel-5 p Tropospheric Monitoring Instrument during the period of analysis.}, } @article {pmid34549196, year = {2021}, author = {Alharbi, A and Abdur Rahman, MD}, title = {Review of Recent Technologies for Tackling COVID-19.}, journal = {SN computer science}, volume = {2}, number = {6}, pages = {460}, pmid = {34549196}, issn = {2661-8907}, abstract = {The current pandemic caused by the COVID-19 virus requires more effort, experience, and science-sharing to overcome the damage caused by the pathogen. The fast and wide human-to-human transmission of the COVID-19 virus demands a significant role of the newest technologies in the form of local and global computing and information sharing, data privacy, and accurate tests. The advancements of deep neural networks, cloud computing solutions, blockchain technology, and beyond 5G (B5G) communication have contributed to the better management of the COVID-19 impacts on society. This paper reviews recent attempts to tackle the COVID-19 situation using these technological advancements.}, } @article {pmid36700091, year = {2021}, author = {Blum, BC and Emili, A}, title = {Omics Notebook: robust, reproducible and flexible automated multiomics exploratory analysis and reporting.}, journal = {Bioinformatics advances}, volume = {1}, number = {1}, pages = {vbab024}, pmid = {36700091}, issn = {2635-0041}, abstract = {SUMMARY: Mass spectrometry is an increasingly important tool for the global interrogation of diverse biomolecules. Unfortunately, the complexity of downstream data analysis is a major challenge for the routine use of these data by investigators from broader training backgrounds. Omics Notebook is an open-source framework for exploratory analysis, reporting and integrating multiomic data that are automated, reproducible and customizable. Built-in functions allow the processing of proteomic data from MaxQuant and metabolomic data from XCMS, along with other omics data in standardized input formats as specified in the documentation. In addition, the use of containerization manages R package installation requirements and is tailored for shared high-performance computing or cloud environments.

Omics Notebook is implemented in Python and R and is available for download from https://github.com/cnsb-boston/Omics_Notebook with additional documentation under a GNU GPLv3 license.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics Advances online.}, } @article {pmid34543209, year = {2022}, author = {Biasi, LD and Citarella, AA and Risi, M and Tortora, G}, title = {A Cloud Approach for Melanoma Detection Based on Deep Learning Networks.}, journal = {IEEE journal of biomedical and health informatics}, volume = {26}, number = {3}, pages = {962-972}, doi = {10.1109/JBHI.2021.3113609}, pmid = {34543209}, issn = {2168-2208}, mesh = {*Deep Learning ; Humans ; Machine Learning ; *Melanoma/diagnostic imaging ; Neural Networks, Computer ; }, abstract = {In the era of digitized images, the goal is to extract information from them and create new knowledge thanks to Computer Vision techniques, Machine Learning and Deep Learning. This enables the use of images for early diagnosis and subsequent treatment of a wide range of diseases. In the dermatological field, deep neural networks are used to distinguish between melanoma and non-melanoma images. In this paper, we have underlined two essential points of melanoma detection research. The first aspect considered is how even a simple modification of the parameters in the dataset determines a change of the accuracy of classifiers. In this case, we investigated the Transfer Learning issues. Following the results of this first analysis, we suggest that continuous training-test iterations are needed to provide robust prediction models. The second point is the need to have a more flexible system architecture that can handle changes in the training datasets. In this context, we proposed the development and implementation of a hybrid architecture based on Cloud, Fog and Edge Computing to provide a Melanoma Detection service based on clinical and dermoscopic images. At the same time, this architecture must deal with the amount of data to be analyzed by reducing the running time of the continuous retrain. This fact has been highlighted with experiments carried out on a single machine and different distribution systems, showing how a distributed approach guarantees output achievement in a much more sufficient time.}, } @article {pmid34541313, year = {2021}, author = {Qawqzeh, Y and Alharbi, MT and Jaradat, A and Abdul Sattar, KN}, title = {A review of swarm intelligence algorithms deployment for scheduling and optimization in cloud computing environments.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e696}, pmid = {34541313}, issn = {2376-5992}, abstract = {BACKGROUND: This review focuses on reviewing the recent publications of swarm intelligence algorithms (particle swarm optimization (PSO), ant colony optimization (ACO), artificial bee colony (ABC), and the firefly algorithm (FA)) in scheduling and optimization problems. Swarm intelligence (SI) can be described as the intelligent behavior of natural living animals, fishes, and insects. In fact, it is based on agent groups or populations in which they have a reliable connection among them and with their environment. Inside such a group or population, each agent (member) performs according to certain rules that make it capable of maximizing the overall utility of that certain group or population. It can be described as a collective intelligence among self-organized members in certain group or population. In fact, biology inspired many researchers to mimic the behavior of certain natural swarms (birds, animals, or insects) to solve some computational problems effectively.

METHODOLOGY: SI techniques were utilized in cloud computing environment seeking optimum scheduling strategies. Hence, the most recent publications (2015-2021) that belongs to SI algorithms are reviewed and summarized.

RESULTS: It is clear that the number of algorithms for cloud computing optimization is increasing rapidly. The number of PSO, ACO, ABC, and FA related journal papers has been visibility increased. However, it is noticeably that many recently emerging algorithms were emerged based on the amendment on the original SI algorithms especially the PSO algorithm.

CONCLUSIONS: The major intention of this work is to motivate interested researchers to develop and innovate new SI-based solutions that can handle complex and multi-objective computational problems.}, } @article {pmid34541307, year = {2021}, author = {Ali, O and Ishak, MK and Bhatti, MKL}, title = {Emerging IoT domains, current standings and open research challenges: a review.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e659}, pmid = {34541307}, issn = {2376-5992}, abstract = {Over the last decade, the Internet of Things (IoT) domain has grown dramatically, from ultra-low-power hardware design to cloud-based solutions, and now, with the rise of 5G technology, a new horizon for edge computing on IoT devices will be introduced. A wide range of communication technologies has steadily evolved in recent years, representing a diverse range of domain areas and communication specifications. Because of the heterogeneity of technology and interconnectivity, the true realisation of the IoT ecosystem is currently hampered by multiple dynamic integration challenges. In this context, several emerging IoT domains necessitate a complete re-modeling, design, and standardisation from the ground up in order to achieve seamless IoT ecosystem integration. The Internet of Nano-Things (IoNT), Internet of Space-Things (IoST), Internet of Underwater-Things (IoUT) and Social Internet of Things (SIoT) are investigated in this paper with a broad future scope based on their integration and ability to source other IoT domains by highlighting their application domains, state-of-the-art research, and open challenges. To the best of our knowledge, there is little or no information on the current state of these ecosystems, which is the motivating factor behind this article. Finally, the paper summarises the integration of these ecosystems with current IoT domains and suggests future directions for overcoming the challenges.}, } @article {pmid34531717, year = {2021}, author = {Fletcher, MD}, title = {Can Haptic Stimulation Enhance Music Perception in Hearing-Impaired Listeners?.}, journal = {Frontiers in neuroscience}, volume = {15}, number = {}, pages = {723877}, pmid = {34531717}, issn = {1662-4548}, abstract = {Cochlear implants (CIs) have been remarkably successful at restoring hearing in severely-to-profoundly hearing-impaired individuals. However, users often struggle to deconstruct complex auditory scenes with multiple simultaneous sounds, which can result in reduced music enjoyment and impaired speech understanding in background noise. Hearing aid users often have similar issues, though these are typically less acute. Several recent studies have shown that haptic stimulation can enhance CI listening by giving access to sound features that are poorly transmitted through the electrical CI signal. This "electro-haptic stimulation" improves melody recognition and pitch discrimination, as well as speech-in-noise performance and sound localization. The success of this approach suggests it could also enhance auditory perception in hearing-aid users and other hearing-impaired listeners. This review focuses on the use of haptic stimulation to enhance music perception in hearing-impaired listeners. Music is prevalent throughout everyday life, being critical to media such as film and video games, and often being central to events such as weddings and funerals. It represents the biggest challenge for signal processing, as it is typically an extremely complex acoustic signal, containing multiple simultaneous harmonic and inharmonic sounds. Signal-processing approaches developed for enhancing music perception could therefore have significant utility for other key issues faced by hearing-impaired listeners, such as understanding speech in noisy environments. This review first discusses the limits of music perception in hearing-impaired listeners and the limits of the tactile system. It then discusses the evidence around integration of audio and haptic stimulation in the brain. Next, the features, suitability, and success of current haptic devices for enhancing music perception are reviewed, as well as the signal-processing approaches that could be deployed in future haptic devices. Finally, the cutting-edge technologies that could be exploited for enhancing music perception with haptics are discussed. These include the latest micro motor and driver technology, low-power wireless technology, machine learning, big data, and cloud computing. New approaches for enhancing music perception in hearing-impaired listeners could substantially improve quality of life. Furthermore, effective haptic techniques for providing complex sound information could offer a non-invasive, affordable means for enhancing listening more broadly in hearing-impaired individuals.}, } @article {pmid34529673, year = {2021}, author = {Andleeb, S and Abbasi, WA and Ghulam Mustafa, R and Islam, GU and Naseer, A and Shafique, I and Parween, A and Shaheen, B and Shafiq, M and Altaf, M and Ali Abbas, S}, title = {ESIDE: A computationally intelligent method to identify earthworm species (E. fetida) from digital images: Application in taxonomy.}, journal = {PloS one}, volume = {16}, number = {9}, pages = {e0255674}, pmid = {34529673}, issn = {1932-6203}, mesh = {Animals ; Computer Simulation ; Ecosystem ; Image Processing, Computer-Assisted/*methods ; *Machine Learning ; Oligochaeta/*classification/physiology ; Photography/*instrumentation ; }, abstract = {Earthworms (Crassiclitellata) being ecosystem engineers significantly affect the physical, chemical, and biological properties of the soil by recycling organic material, increasing nutrient availability, and improving soil structure. The efficiency of earthworms in ecology varies along with species. Therefore, the role of taxonomy in earthworm study is significant. The taxonomy of earthworms cannot reliably be established through morphological characteristics because the small and simple body plan of the earthworm does not have anatomical complex and highly specialized structures. Recently, molecular techniques have been adopted to accurately classify the earthworm species but these techniques are time-consuming and costly. To combat this issue, in this study, we propose a machine learning-based earthworm species identification model that uses digital images of earthworms. We performed a stringent performance evaluation not only through 10-fold cross-validation and on an external validation dataset but also in real settings by involving an experienced taxonomist. In all the evaluation settings, our proposed model has given state-of-the-art performance and justified its use to aid earthworm taxonomy studies. We made this model openly accessible through a cloud-based webserver and python code available at https://sites.google.com/view/wajidarshad/software and https://github.com/wajidarshad/ESIDE.}, } @article {pmid34525685, year = {2022}, author = {Gxokwe, S and Dube, T and Mazvimavi, D}, title = {Leveraging Google Earth Engine platform to characterize and map small seasonal wetlands in the semi-arid environments of South Africa.}, journal = {The Science of the total environment}, volume = {803}, number = {}, pages = {150139}, doi = {10.1016/j.scitotenv.2021.150139}, pmid = {34525685}, issn = {1879-1026}, mesh = {Bayes Theorem ; Environmental Monitoring ; *Search Engine ; Seasons ; South Africa ; *Wetlands ; }, abstract = {Although significant scientific research strides have been made in mapping the spatial extents and ecohydrological dynamics of wetlands in semi-arid environments, the focus on small wetlands remains a challenge. This is due to the sensing characteristics of remote sensing platforms and lack of robust data processing techniques. Advancements in data analytic tools, such as the introduction of Google Earth Engine (GEE) platform provides unique opportunities for improved assessment of small and scattered wetlands. This study thus assessed the capabilities of GEE cloud-computing platform in characterising small seasonal flooded wetlands, using the new generation Sentinel 2 data from 2016 to 2020. Specifically, the study assessed the spectral separability of different land cover classes for two different wetlands detected, using Sentinel-2 multi-year composite water and vegetation indices and to identify the most suitable GEE machine learning algorithm for accurately detecting and mapping semi-arid seasonal wetlands. This was achieved using the object based Random Forest (RF), Support Vector Machine (SVM), Classification and Regression Tree (CART) and Naïve Bayes (NB) advanced algorithms in GEE. The results demonstrated the capabilities of using the GEE platform to characterize wetlands with acceptable accuracy. All algorithms showed superiority, in mapping the two wetlands except for the NB method, which had lowest overall classification accuracy. These findings underscore the relevance of the GEE platform, Sentinel-2 data and advanced algorithms in characterizing small and seasonal semi-arid wetlands.}, } @article {pmid34522068, year = {2023}, author = {Nasser, N and Emad-Ul-Haq, Q and Imran, M and Ali, A and Razzak, I and Al-Helali, A}, title = {A smart healthcare framework for detection and monitoring of COVID-19 using IoT and cloud computing.}, journal = {Neural computing & applications}, volume = {35}, number = {19}, pages = {13775-13789}, pmid = {34522068}, issn = {0941-0643}, abstract = {Coronavirus (COVID-19) is a very contagious infection that has drawn the world's attention. Modeling such diseases can be extremely valuable in predicting their effects. Although classic statistical modeling may provide adequate models, it may also fail to understand the data's intricacy. An automatic COVID-19 detection system based on computed tomography (CT) scan or X-ray images is effective, but a robust system design is challenging. In this study, we propose an intelligent healthcare system that integrates IoT-cloud technologies. This architecture uses smart connectivity sensors and deep learning (DL) for intelligent decision-making from the perspective of the smart city. The intelligent system tracks the status of patients in real time and delivers reliable, timely, and high-quality healthcare facilities at a low cost. COVID-19 detection experiments are performed using DL to test the viability of the proposed system. We use a sensor for recording, transferring, and tracking healthcare data. CT scan images from patients are sent to the cloud by IoT sensors, where the cognitive module is stored. The system decides the patient status by examining the images of the CT scan. The DL cognitive module makes the real-time decision on the possible course of action. When information is conveyed to a cognitive module, we use a state-of-the-art classification algorithm based on DL, i.e., ResNet50, to detect and classify whether the patients are normal or infected by COVID-19. We validate the proposed system's robustness and effectiveness using two benchmark publicly available datasets (Covid-Chestxray dataset and Chex-Pert dataset). At first, a dataset of 6000 images is prepared from the above two datasets. The proposed system was trained on the collection of images from 80% of the datasets and tested with 20% of the data. Cross-validation is performed using a tenfold cross-validation technique for performance evaluation. The results indicate that the proposed system gives an accuracy of 98.6%, a sensitivity of 97.3%, a specificity of 98.2%, and an F1-score of 97.87%. Results clearly show that the accuracy, specificity, sensitivity, and F1-score of our proposed method are high. The comparison shows that the proposed system performs better than the existing state-of-the-art systems. The proposed system will be helpful in medical diagnosis research and healthcare systems. It will also support the medical experts for COVID-19 screening and lead to a precious second opinion.}, } @article {pmid34518711, year = {2021}, author = {Sood, SK and Rawat, KS}, title = {A fog assisted intelligent framework based on cyber physical system for safe evacuation in panic situations.}, journal = {Computer communications}, volume = {178}, number = {}, pages = {297-306}, pmid = {34518711}, issn = {0140-3664}, abstract = {In the current scenario of the COVID-19 pandemic and worldwide health emergency, one of the major challenges is to identify and predict the panic health of persons. The management of panic health and on-time evacuation prevents COVID-19 infection incidences in educational institutions and public places. Therefore, a system is required to predict the infection and suggests a safe evacuation path to people that control panic scenarios with mortality. In this paper, a fog-assisted cyber physical system is introduced to control panic attacks and COVID-19 infection risk in public places. The proposed model uses the concept of physical and cyber space. The physical space helps in real time data collection and transmission of the alert generation to the stakeholders. Cyberspace consists of two spaces, fog space, and cloud-space. The fog-space facilitates panic health and COVID-19 symptoms determination with alert generation for risk-affected areas. Cloud space monitors and predicts the person's panic health and symptoms using the SARIMA model. Furthermore, it also identifies risk-prone regions in the affected place using Geographical Population Analysis. The performance evaluation acknowledges the efficiency related to panic health determination and prediction based on the SARIMA with risks mapping accuracy. The proposed system provides an efficient on time evacuation with priority from risk-affected places that protect people from attacks due to panic and infection caused by COVID-19.}, } @article {pmid34514787, year = {2021}, author = {Lin, Z and Zou, J and Liu, S and Peng, C and Li, Z and Wan, X and Fang, D and Yin, J and Gobbo, G and Chen, Y and Ma, J and Wen, S and Zhang, P and Yang, M}, title = {Correction to "A Cloud Computing Platform for Scalable Relative and Absolute Binding Free Energy Prediction: New Opportunities and Challenges for Drug Discovery".}, journal = {Journal of chemical information and modeling}, volume = {61}, number = {9}, pages = {4819}, doi = {10.1021/acs.jcim.1c00934}, pmid = {34514787}, issn = {1549-960X}, } @article {pmid34514378, year = {2021}, author = {Sang, GM and Xu, L and de Vrieze, P}, title = {A Predictive Maintenance Model for Flexible Manufacturing in the Context of Industry 4.0.}, journal = {Frontiers in big data}, volume = {4}, number = {}, pages = {663466}, pmid = {34514378}, issn = {2624-909X}, abstract = {The Industry 4.0 paradigm is the focus of modern manufacturing system design. The integration of cutting-edge technologies such as the Internet of things, cyber-physical systems, big data analytics, and cloud computing requires a flexible platform supporting the effective optimization of manufacturing-related processes, e.g., predictive maintenance. Existing predictive maintenance studies generally focus on either a predictive model without considering the maintenance decisions or maintenance optimizations based on the degradation models of the known system. To address this, we propose PMMI 4.0, a Predictive Maintenance Model for Industry 4.0, which utilizes a newly proposed solution PMS4MMC for supporting an optimized maintenance schedule plan for multiple machine components driven by a data-driven LSTM model for RUL (remaining useful life) estimation. The effectiveness of the proposed solution is demonstrated using a real-world industrial case with related data. The results showed the validity and applicability of this work.}, } @article {pmid34512110, year = {2021}, author = {Ahmadi, Z and Haghi Kashani, M and Nikravan, M and Mahdipour, E}, title = {Fog-based healthcare systems: A systematic review.}, journal = {Multimedia tools and applications}, volume = {80}, number = {30}, pages = {36361-36400}, pmid = {34512110}, issn = {1380-7501}, abstract = {The healthcare system aims to provide a reliable and organized solution to enhance the health of human society. Studying the history of patients can help physicians to consider patients' needs in healthcare system designing and offering service, which leads to an increase in patient satisfaction. Therefore, healthcare is becoming a growing contesting market. With this significant growth in healthcare systems, such challenges as huge data volume, response time, latency, and security vulnerability are raised. Therefore, fog computing, as a well-known distributed architecture, could help to solve such challenges. In fog computing architecture, processing components are placed between the end devices and cloud components, and they execute applications. This architecture is suitable for such applications as healthcare systems that need a real-time response and low latency. In this paper, a systematic review of available approaches in the field of fog-based healthcare systems is proposed; the challenges of its application in healthcare are explored, classified, and discussed. First, the fog computing approaches in healthcare are categorized into three main classes: communication, application, and resource/service. Then, they are discussed and compared based on their tools, evaluation methods, and evaluation metrics. Finally, based on observations, some open issues and challenges are highlighted for further studies in fog-based healthcare.}, } @article {pmid34512108, year = {2021}, author = {Kolak, M and Li, X and Lin, Q and Wang, R and Menghaney, M and Yang, S and Anguiano, V}, title = {The US COVID Atlas: A dynamic cyberinfrastructure surveillance system for interactive exploration of the pandemic.}, journal = {Transactions in GIS : TG}, volume = {25}, number = {4}, pages = {1741-1765}, pmid = {34512108}, issn = {1361-1682}, support = {U2C DA050098/DA/NIDA NIH HHS/United States ; }, abstract = {Distributed spatial infrastructures leveraging cloud computing technologies can tackle issues of disparate data sources and address the need for data-driven knowledge discovery and more sophisticated spatial analysis central to the COVID-19 pandemic. We implement a new, open source spatial middleware component (libgeoda) and system design to scale development quickly to effectively meet the need for surveilling county-level metrics in a rapidly changing pandemic landscape. We incorporate, wrangle, and analyze multiple data streams from volunteered and crowdsourced environments to leverage multiple data perspectives. We integrate explorative spatial data analysis (ESDA) and statistical hotspot standards to detect infectious disease clusters in real time, building on decades of research in GIScience and spatial statistics. We scale the computational infrastructure to provide equitable access to data and insights across the entire USA, demanding a basic but high-quality standard of ESDA techniques. Finally, we engage a research coalition and incorporate principles of user-centered design to ground the direction and design of Atlas application development.}, } @article {pmid34511519, year = {2022}, author = {Gómez, D and Romero, J and López, P and Vázquez, J and Cappo, C and Pinto, D and Villalba, C}, title = {Cloud architecture for electronic health record systems interoperability.}, journal = {Technology and health care : official journal of the European Society for Engineering and Medicine}, volume = {30}, number = {3}, pages = {551-564}, doi = {10.3233/THC-212806}, pmid = {34511519}, issn = {1878-7401}, mesh = {*Electronic Health Records ; Humans ; *Software ; }, abstract = {BACKGROUND: Current Electronic Health Record (EHR) systems are built using different data representation and information models, which makes difficult achieving information exchange.

OBJECTIVE: Our aim was to propose a scalable architecture that allows the integration of information from different EHR systems.

METHODS: A cloud-based EHR interoperable architecture is proposed through the standardization and integration of patient electronic health records. The data is stored in a cloud repository with high availability features. Stakeholders can retrieve the patient EHR by requesting only to the integrated data repository. The OpenEHR two-level approach is applied according to the HL7-FHIR standards. We validated our architecture by comparing it with 5 different works (CHISTAR, ARIEN, DIRAYA, LLPHR and INEHRIS) using a set of selected axes and a scoring method.

RESULTS: The problem was reduced to a single point of communication between each EHR system and the integrated data repository. By combining cloud computing paradigm with selected health informatics standards, we obtained a generic and scalable architecture that complies 100% with interoperability requisites according to the evaluation framework applied.

CONCLUSIONS: The architecture allowed the integration of several EHR systems, adapting them with the use of standards and ensuring the availability thanks to cloud computing features.}, } @article {pmid34510300, year = {2021}, author = {Pang, J and Bachmatiuk, A and Yang, F and Liu, H and Zhou, W and Rümmeli, MH and Cuniberti, G}, title = {Applications of Carbon Nanotubes in the Internet of Things Era.}, journal = {Nano-micro letters}, volume = {13}, number = {1}, pages = {191}, pmid = {34510300}, issn = {2150-5551}, abstract = {The post-Moore's era has boosted the progress in carbon nanotube-based transistors. Indeed, the 5G communication and cloud computing stimulate the research in applications of carbon nanotubes in electronic devices. In this perspective, we deliver the readers with the latest trends in carbon nanotube research, including high-frequency transistors, biomedical sensors and actuators, brain-machine interfaces, and flexible logic devices and energy storages. Future opportunities are given for calling on scientists and engineers into the emerging topics.}, } @article {pmid34505137, year = {2022}, author = {Grzesik, P and Augustyn, DR and Wyciślik, Ł and Mrozek, D}, title = {Serverless computing in omics data analysis and integration.}, journal = {Briefings in bioinformatics}, volume = {23}, number = {1}, pages = {}, pmid = {34505137}, issn = {1477-4054}, support = {02/100/RGJ21/0009//Silesian University of Technology/ ; }, mesh = {COVID-19/epidemiology/*genetics/*metabolism ; *Cloud Computing ; *Computational Biology ; *Genomics ; Humans ; *Pandemics ; *SARS-CoV-2/genetics/metabolism ; *Software ; }, abstract = {A comprehensive analysis of omics data can require vast computational resources and access to varied data sources that must be integrated into complex, multi-step analysis pipelines. Execution of many such analyses can be accelerated by applying the cloud computing paradigm, which provides scalable resources for storing data of different types and parallelizing data analysis computations. Moreover, these resources can be reused for different multi-omics analysis scenarios. Traditionally, developers are required to manage a cloud platform's underlying infrastructure, configuration, maintenance and capacity planning. The serverless computing paradigm simplifies these operations by automatically allocating and maintaining both servers and virtual machines, as required for analysis tasks. This paradigm offers highly parallel execution and high scalability without manual management of the underlying infrastructure, freeing developers to focus on operational logic. This paper reviews serverless solutions in bioinformatics and evaluates their usage in omics data analysis and integration. We start by reviewing the application of the cloud computing model to a multi-omics data analysis and exposing some shortcomings of the early approaches. We then introduce the serverless computing paradigm and show its applicability for performing an integrative analysis of multiple omics data sources in the context of the COVID-19 pandemic.}, } @article {pmid34502840, year = {2021}, author = {Mateo-Fornés, J and Pagès-Bernaus, A and Plà-Aragonés, LM and Castells-Gasia, JP and Babot-Gaspa, D}, title = {An Internet of Things Platform Based on Microservices and Cloud Paradigms for Livestock.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {17}, pages = {}, pmid = {34502840}, issn = {1424-8220}, mesh = {Agriculture ; Animals ; Farms ; *Internet of Things ; Livestock ; Software ; Swine ; }, abstract = {With the growing adoption of the Internet of Things (IoT) technology in the agricultural sector, smart devices are becoming more prevalent. The availability of new, timely, and precise data offers a great opportunity to develop advanced analytical models. Therefore, the platform used to deliver new developments to the final user is a key enabler for adopting IoT technology. This work presents a generic design of a software platform based on the cloud and implemented using microservices to facilitate the use of predictive or prescriptive analytics under different IoT scenarios. Several technologies are combined to comply with the essential features-scalability, portability, interoperability, and usability-that the platform must consider to assist decision-making in agricultural 4.0 contexts. The platform is prepared to integrate new sensor devices, perform data operations, integrate several data sources, transfer complex statistical model developments seamlessly, and provide a user-friendly graphical interface. The proposed software architecture is implemented with open-source technologies and validated in a smart farming scenario. The growth of a batch of pigs at the fattening stage is estimated from the data provided by a level sensor installed in the silo that stores the feed from which the animals are fed. With this application, we demonstrate how farmers can monitor the weight distribution and receive alarms when high deviations happen.}, } @article {pmid34502813, year = {2021}, author = {Kalyani, Y and Collier, R}, title = {A Systematic Survey on the Role of Cloud, Fog, and Edge Computing Combination in Smart Agriculture.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {17}, pages = {}, pmid = {34502813}, issn = {1424-8220}, support = {16/SPP/3296./SFI_/Science Foundation Ireland/Ireland ; }, mesh = {*Agriculture ; *Cloud Computing ; }, abstract = {Cloud Computing is a well-established paradigm for building service-centric systems. However, ultra-low latency, high bandwidth, security, and real-time analytics are limitations in Cloud Computing when analysing and providing results for a large amount of data. Fog and Edge Computing offer solutions to the limitations of Cloud Computing. The number of agricultural domain applications that use the combination of Cloud, Fog, and Edge is increasing in the last few decades. This article aims to provide a systematic literature review of current works that have been done in Cloud, Fog, and Edge Computing applications in the smart agriculture domain between 2015 and up-to-date. The key objective of this review is to identify all relevant research on new computing paradigms with smart agriculture and propose a new architecture model with the combinations of Cloud-Fog-Edge. Furthermore, it also analyses and examines the agricultural application domains, research approaches, and the application of used combinations. Moreover, this survey discusses the components used in the architecture models and briefly explores the communication protocols used to interact from one layer to another. Finally, the challenges of smart agriculture and future research directions are briefly pointed out in this article.}, } @article {pmid34502795, year = {2021}, author = {Stan, RG and Băjenaru, L and Negru, C and Pop, F}, title = {Evaluation of Task Scheduling Algorithms in Heterogeneous Computing Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {17}, pages = {}, pmid = {34502795}, issn = {1424-8220}, support = {PubArt//University Politehnica of Bucharest/ ; }, mesh = {*Algorithms ; Cloud Computing ; Computer Simulation ; *Ecosystem ; Workload ; }, abstract = {This work establishes a set of methodologies to evaluate the performance of any task scheduling policy in heterogeneous computing contexts. We formally state a scheduling model for hybrid edge-cloud computing ecosystems and conduct simulation-based experiments on large workloads. In addition to the conventional cloud datacenters, we consider edge datacenters comprising smartphone and Raspberry Pi edge devices, which are battery powered. We define realistic capacities of the computational resources. Once a schedule is found, the various task demands can or cannot be fulfilled by the resource capacities. We build a scheduling and evaluation framework and measure typical scheduling metrics such as mean waiting time, mean turnaround time, makespan, throughput on the Round-Robin, Shortest Job First, Min-Min and Max-Min scheduling schemes. Our analysis and results show that the state-of-the-art independent task scheduling algorithms suffer from performance degradation in terms of significant task failures and nonoptimal resource utilization of datacenters in heterogeneous edge-cloud mediums in comparison to cloud-only mediums. In particular, for large sets of tasks, due to low battery or limited memory, more than 25% of tasks fail to execute for each scheduling scheme.}, } @article {pmid34502696, year = {2021}, author = {Resende, JS and Magalhães, L and Brandão, A and Martins, R and Antunes, L}, title = {Towards a Modular On-Premise Approach for Data Sharing.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {17}, pages = {}, pmid = {34502696}, issn = {1424-8220}, support = {PD/BD/128149/2016//Faculdade de Ciências e Tecnologia, Universidade Nova de Lisboa/ ; 830929//H2020-SUICT-03-2018/ ; }, mesh = {Algorithms ; Artificial Intelligence ; *Computer Security ; *Information Dissemination ; Privacy ; }, abstract = {The growing demand for everyday data insights drives the pursuit of more sophisticated infrastructures and artificial intelligence algorithms. When combined with the growing number of interconnected devices, this originates concerns about scalability and privacy. The main problem is that devices can detect the environment and generate large volumes of possibly identifiable data. Public cloud-based technologies have been proposed as a solution, due to their high availability and low entry costs. However, there are growing concerns regarding data privacy, especially with the introduction of the new General Data Protection Regulation, due to the inherent lack of control caused by using off-premise computational resources on which public cloud belongs. Users have no control over the data uploaded to such services as the cloud, which increases the uncontrolled distribution of information to third parties. This work aims to provide a modular approach that uses cloud-of-clouds to store persistent data and reduce upfront costs while allowing information to remain private and under users' control. In addition to storage, this work also extends focus on usability modules that enable data sharing. Any user can securely share and analyze/compute the uploaded data using private computing without revealing private data. This private computation can be training machine learning (ML) models. To achieve this, we use a combination of state-of-the-art technologies, such as MultiParty Computation (MPC) and K-anonymization to produce a complete system with intrinsic privacy properties.}, } @article {pmid34502688, year = {2021}, author = {Mutichiro, B and Tran, MN and Kim, YH}, title = {QoS-Based Service-Time Scheduling in the IoT-Edge Cloud.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {17}, pages = {}, pmid = {34502688}, issn = {1424-8220}, support = {2020-0-00946//Institute of Information & communications Technology Planning & Evaluation (IITP) , Korea government (MSIT)/ ; }, mesh = {Algorithms ; *Internet of Things ; Software ; Workload ; }, abstract = {In edge computing, scheduling heterogeneous workloads with diverse resource requirements is challenging. Besides limited resources, the servers may be overwhelmed with computational tasks, resulting in lengthy task queues and congestion occasioned by unusual network traffic patterns. Additionally, Internet of Things (IoT)/Edge applications have different characteristics coupled with performance requirements, which become determinants if most edge applications can both satisfy deadlines and each user's QoS requirements. This study aims to address these restrictions by proposing a mechanism that improves the cluster resource utilization and Quality of Service (QoS) in an edge cloud cluster in terms of service time. Containerization can provide a way to improve the performance of the IoT-Edge cloud by factoring in task dependencies and heterogeneous application resource demands. In this paper, we propose STaSA, a service time aware scheduler for the edge environment. The algorithm automatically assigns requests onto different processing nodes and then schedules their execution under real-time constraints, thus minimizing the number of QoS violations. The effectiveness of our scheduling model is demonstrated through implementation on KubeEdge, a container orchestration platform based on Kubernetes. Experimental results show significantly fewer violations in QoS during scheduling and improved performance compared to the state of the art.}, } @article {pmid34498660, year = {2021}, author = {Camargo, MD and Silveira, DT and Lazzari, DD and Rodrigues, AFV and Moraes, KB and Duarte, ERM}, title = {Nursing Activities Score: trajectory of the instrument from paper to cloud in a university hospital.}, journal = {Revista da Escola de Enfermagem da U S P}, volume = {55}, number = {}, pages = {e20200233}, doi = {10.1590/1980-220X-REEUSP-2020-0233}, pmid = {34498660}, issn = {1980-220X}, mesh = {*Computer Security ; Health Insurance Portability and Accountability Act ; Hospitals, University ; Humans ; *Nursing Care ; Software ; United States ; }, abstract = {OBJECTIVE: To report the process of organization and construction of an information technology structure named Nursing Activities Score (NAS) Cloud Technology®.

METHOD: This project was based on the life cycle theory and has enabled the development of technological production through software engineering.

RESULTS: The NAS Cloud Technology® was developed for remote and collaborative access on a website hosted by Google Sites® and protected in a business environment by the certified security and data protection devices Health Insurance Portability and Accountability Act (HIPPA). In 2015, this system received more than 10.000 submissions/month, totaling 12 care units for critical patients covered by the information technology structure, circa 200 nurses per day involved in the collection and hundreds of daily submissions, integrating the complete transition from paper to cloud.

CONCLUSION: The development of NAS Cloud Technology® system has enabled the use of technology as a facilitating means for the use of Nursing care data, providing tools for decision-making on the nursing personnel sizing required for the care demands in the inpatient care units. The potential of cloud structures stands out due to their possibility of innovation, as well as low-cost access and high replicability of the information system.}, } @article {pmid34497501, year = {2021}, author = {Luo, X and Feng, L and Xun, H and Zhang, Y and Li, Y and Yin, L}, title = {Rinegan: A Scalable Image Processing Architecture for Large Scale Surveillance Applications.}, journal = {Frontiers in neurorobotics}, volume = {15}, number = {}, pages = {648101}, pmid = {34497501}, issn = {1662-5218}, abstract = {Image processing is widely used in intelligent robots, significantly improving the surveillance capabilities of smart buildings, industrial parks, and border ports. However, relying on the camera installed in a single robot is not enough since it only provides a narrow field of view as well as limited processing performance. Specially, a target person such as the suspect may appear anywhere and tracking the suspect in such a large-scale scene requires cooperation between fixed cameras and patrol robots. This induces a significant surge in demand for data, computing resources, as well as networking infrastructures. In this work, we develop a scalable architecture to optimize image processing efficacy and response rate for visual ability. In this architecture, the lightweight pre-process and object detection functions are deployed on the gateway-side to minimize the bandwidth consumption. Cloud-side servers receive solely the recognized data rather than entire image or video streams to identify specific suspect. Then the cloud-side sends the information to the robot, and the robot completes the corresponding tracking task. All these functions are implemented and orchestrated based on micro-service architecture to improve the flexibility. We implement a prototype system, called Rinegan, and evaluate it in an in-lab testing environment. The result shows that Rinegan is able to improve the effectiveness and efficacy of image processing.}, } @article {pmid34489813, year = {2021}, author = {Shan, B and Pu, Y and Chen, B and Lu, S}, title = {New Technologies' Commercialization: The Roles of the Leader's Emotion and Incubation Support.}, journal = {Frontiers in psychology}, volume = {12}, number = {}, pages = {710122}, pmid = {34489813}, issn = {1664-1078}, abstract = {New technologies, such as brain-computer interfaces technology, advanced artificial intelligence, cloud computing, and virtual reality technology, have a strong influence on our daily activities. The application and commercialization of these technologies are prevailing globally, such as distance education, health monitoring, smart home devices, and robots. However, we still know little about the roles of individual emotion and the external environment on the commercialization of these new technologies. Therefore, we focus on the emotional factor of the leader, which is their passion for work, and discuss its effect on technology commercialization. We also analyzed the moderating role of incubation support in the relationship between the leader's emotion and technology commercialization. The results contribute to the application of emotion in improving the commercialization of new technologies.}, } @article {pmid34479966, year = {2021}, author = {Govind, D and Becker, JU and Miecznikowski, J and Rosenberg, AZ and Dang, J and Tharaux, PL and Yacoub, R and Thaiss, F and Hoyer, PF and Manthey, D and Lutnick, B and Worral, AM and Mohammad, I and Walavalkar, V and Tomaszewski, JE and Jen, KY and Sarder, P}, title = {PodoSighter: A Cloud-Based Tool for Label-Free Podocyte Detection in Kidney Whole-Slide Images.}, journal = {Journal of the American Society of Nephrology : JASN}, volume = {32}, number = {11}, pages = {2795-2813}, pmid = {34479966}, issn = {1533-3450}, support = {R01 DK114485/DK/NIDDK NIH HHS/United States ; U2C DK114886/DK/NIDDK NIH HHS/United States ; UL1 TR001412/TR/NCATS NIH HHS/United States ; U01 DK103225/DK/NIDDK NIH HHS/United States ; }, mesh = {Animals ; Automation ; Cell Count ; Cell Nucleus/ultrastructure ; *Cloud Computing ; Datasets as Topic ; Deep Learning ; Diabetic Nephropathies/chemically induced/pathology ; Disease Models, Animal ; Humans ; Image Processing, Computer-Assisted/*methods ; Kidney Diseases/*pathology ; Kidney Glomerulus/*cytology ; Mice ; Mice, Inbred C57BL ; Microscopy ; Periodic Acid-Schiff Reaction ; Podocytes/*ultrastructure ; Rats ; Species Specificity ; }, abstract = {BACKGROUND: Podocyte depletion precedes progressive glomerular damage in several kidney diseases. However, the current standard of visual detection and quantification of podocyte nuclei from brightfield microscopy images is laborious and imprecise.

METHODS: We have developed PodoSighter, an online cloud-based tool, to automatically identify and quantify podocyte nuclei from giga-pixel brightfield whole-slide images (WSIs) using deep learning. Ground-truth to train the tool used immunohistochemically or immunofluorescence-labeled images from a multi-institutional cohort of 122 histologic sections from mouse, rat, and human kidneys. To demonstrate the generalizability of our tool in investigating podocyte loss in clinically relevant samples, we tested it in rodent models of glomerular diseases, including diabetic kidney disease, crescentic GN, and dose-dependent direct podocyte toxicity and depletion, and in human biopsies from steroid-resistant nephrotic syndrome and from human autopsy tissues.

RESULTS: The optimal model yielded high sensitivity/specificity of 0.80/0.80, 0.81/0.86, and 0.80/0.91, in mouse, rat, and human images, respectively, from periodic acid-Schiff-stained WSIs. Furthermore, the podocyte nuclear morphometrics extracted using PodoSighter were informative in identifying diseased glomeruli. We have made PodoSighter freely available to the general public as turnkey plugins in a cloud-based web application for end users.

CONCLUSIONS: Our study demonstrates an automated computational approach to detect and quantify podocyte nuclei in standard histologically stained WSIs, facilitating podocyte research, and enabling possible future clinical applications.}, } @article {pmid34461487, year = {2021}, author = {Wang, C and Qin, J and Qu, C and Ran, X and Liu, C and Chen, B}, title = {A smart municipal waste management system based on deep-learning and Internet of Things.}, journal = {Waste management (New York, N.Y.)}, volume = {135}, number = {}, pages = {20-29}, doi = {10.1016/j.wasman.2021.08.028}, pmid = {34461487}, issn = {1879-2456}, mesh = {*Deep Learning ; *Garbage ; *Internet of Things ; *Refuse Disposal ; *Waste Management ; }, abstract = {A proof-of-concept municipal waste management system was proposed to reduce the cost of waste classification, monitoring and collection. In this system, we utilize the deep learning-based classifier and cloud computing technique to realize high accuracy waste classification at the beginning of garbage collection. To facilitate the subsequent waste disposal, we subdivide recyclable waste into plastic, glass, paper or cardboard, metal, fabric and the other recyclable waste, a total of six categories. Deep-learning convolution neural networks (CNN) were applied to realize the garbage classification task. Here, we investigate seven state-of-the-art CNNs and data pre-processing methods for waste classification, whose accuracies of nine categories range from 91.9 to 94.6% in the validation set. Among these networks, MobileNetV3 has a high classification accuracy (94.26%), a small storage size (49.5 MB) and the shortest running time (261.7 ms). Moreover, the Internet of Things (IoT) devices which implement information exchange between waste containers and waste management center are designed to monitor the overall amount of waste produced in this area and the operating state of any waste container via a set of sensors. According to monitoring information, the waste management center can schedule adaptive equipment deployment and maintenance, waste collection and vehicle routing plans, which serves as an essential part of a successful municipal waste management system.}, } @article {pmid34458659, year = {2021}, author = {Bellal, Z and Nour, B and Mastorakis, S}, title = {CoxNet: A Computation Reuse Architecture at the Edge.}, journal = {IEEE transactions on green communications and networking}, volume = {5}, number = {2}, pages = {765-777}, pmid = {34458659}, issn = {2473-2400}, abstract = {In recent years, edge computing has emerged as an effective solution to extend cloud computing and satisfy the demand of applications for low latency. However, with today's explosion of innovative applications (e.g., augmented reality, natural language processing, virtual reality), processing services for mobile and smart devices have become computation-intensive, consisting of multiple interconnected computations. This coupled with the need for delay-sensitivity and high quality of service put massive pressure on edge servers. Meanwhile, tasks invoking these services may involve similar inputs that could lead to the same output. In this paper, we present CoxNet, an efficient computation reuse architecture for edge computing. CoxNet enables edge servers to reuse previous computations while scheduling dependent incoming computations. We provide an analytical model for computation reuse joined with dependent task offloading and design a novel computing offloading scheduling scheme. We also evaluate the efficiency and effectiveness of CoxNet via synthetic and real-world datasets. Our results show that CoxNet is able to reduce the task execution time up to 66% based on a synthetic dataset and up to 50% based on a real-world dataset.}, } @article {pmid34458569, year = {2021}, author = {Peechara, RR and V, S}, title = {A chaos theory inspired, asynchronous two-way encryption mechanism for cloud computing.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e628}, pmid = {34458569}, issn = {2376-5992}, abstract = {Data exchange over the Internet and other access channels is on the rise, leads to the insecurity of consequences. Many experiments have been conducted to investigate time-efficient and high-randomized encryption methods for the data. The latest studies, however, have still been debated because of different factors. The study outcomes do not yield completely random keys for encryption methods that are longer than this. Prominent repetition makes the processes predictable and susceptible to assaults. Furthermore, recently generated keys need recent algorithms to run at a high volume of transactional data successfully. In this article, the proposed solutions to these two critical issues are presented. In the beginning, one must use the chaotic series of events for generating keys is sufficient to obtain a high degree of randomness. Moreover, this work also proposes a novel and non-traditional validation test to determine the true randomness of the keys produced from a correlation algorithm. An approximate 100% probability of the vital phase over almost infinitely long-time intervals minimizes the algorithms' complexity for the higher volume of data security. It is suggested that these algorithms are mainly intended for cloud-based transactions. Data volume is potentially higher and extremely changeable 3% to 4% of the improvement in data transmission time with suggested algorithms. This research has the potential to improve communication systems over ten years by unblocking decades-long bottlenecks.}, } @article {pmid34456613, year = {2021}, author = {Duan, L and Da Xu, L}, title = {Data Analytics in Industry 4.0: A Survey.}, journal = {Information systems frontiers : a journal of research and innovation}, volume = {}, number = {}, pages = {1-17}, pmid = {34456613}, issn = {1387-3326}, abstract = {Industry 4.0 is the fourth industrial revolution for decentralized production through shared facilities to achieve on-demand manufacturing and resource efficiency. It evolves from Industry 3.0 which focuses on routine operation. Data analytics is the set of techniques focus on gain actionable insight to make smart decisions from a massive amount of data. As the performance of routine operation can be improved by smart decisions and smart decisions need the support from routine operation to collect relevant data, there is an increasing amount of research effort in the merge between Industry 4.0 and data analytics. To better understand current research efforts, hot topics, and tending topics on this critical intersection, the basic concepts in Industry 4.0 and data analytics are introduced first. Then the merge between them is decomposed into three components: industry sectors, cyber-physical systems, and analytic methods. Joint research efforts on different intersections with different components are studied and discussed. Finally, a systematic literature review on the interaction between Industry 4.0 and data analytics is conducted to understand the existing research focus and trend.}, } @article {pmid34450978, year = {2021}, author = {Rodero, C and Olmedo, E and Bardaji, R and Piera, J}, title = {New Radiometric Approaches to Compute Underwater Irradiances: Potential Applications for High-Resolution and Citizen Science-Based Water Quality Monitoring Programs.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {16}, pages = {}, pmid = {34450978}, issn = {1424-8220}, support = {776480//Horizon 2020 Framework Programme/ ; }, mesh = {*Citizen Science ; Environmental Monitoring ; Water ; *Water Quality ; }, abstract = {Measuring the diffuse attenuation coefficient (Kd) allows for monitoring the water body's environmental status. This parameter is of particular interest in water quality monitoring programs because it quantifies the presence of light and the euphotic zone's depth. Citizen scientists can meaningfully contribute by monitoring water quality, complementing traditional methods by reducing monitoring costs and significantly improving data coverage, empowering and supporting decision-making. However, the quality of the acquisition of in situ underwater irradiance measurements has some limitations, especially in areas where stratification phenomena occur in the first meters of depth. This vertical layering introduces a gradient of properties in the vertical direction, affecting the associated Kd. To detect and characterize these variations of Kd in the water column, it needs a system of optical sensors, ideally placed in a range of a few cm, improving the low vertical accuracy. Despite that, the problem of self-shading on the instrumentation becomes critical. Here, we introduce a new concept that aims to improve the vertical accuracy of the irradiance measurements: the underwater annular irradiance (Ea). This new concept consists of measuring the irradiance in an annular-shaped distribution. We first compute the optimal annular angle that avoids self-shading and maximizes the light captured by the sensors. Second, we use different scenarios of water types, solar zenith angle, and cloud coverage to assess the robustness of the corresponding diffuse attenuation coefficient, Ka. Finally, we derive empirical functions for computing Kd from Ka. This new concept opens the possibility to a new generation of optical sensors in an annular-shaped distribution which is expected to (a) increase the vertical resolution of the irradiance measurements and (b) be easy to deploy and maintain and thus to be more suitable for citizen scientists.}, } @article {pmid34450973, year = {2021}, author = {Lopez-Arevalo, I and Gonzalez-Compean, JL and Hinojosa-Tijerina, M and Martinez-Rendon, C and Montella, R and Martinez-Rodriguez, JL}, title = {A WoT-Based Method for Creating Digital Sentinel Twins of IoT Devices.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {16}, pages = {}, pmid = {34450973}, issn = {1424-8220}, support = {41756//FORDECYT-CONACYT/ ; }, abstract = {The data produced by sensors of IoT devices are becoming keystones for organizations to conduct critical decision-making processes. However, delivering information to these processes in real-time represents two challenges for the organizations: the first one is achieving a constant dataflow from IoT to the cloud and the second one is enabling decision-making processes to retrieve data from dataflows in real-time. This paper presents a cloud-based Web of Things method for creating digital twins of IoT devices (named sentinels).The novelty of the proposed approach is that sentinels create an abstract window for decision-making processes to: (a) find data (e.g., properties, events, and data from sensors of IoT devices) or (b) invoke functions (e.g., actions and tasks) from physical devices (PD), as well as from virtual devices (VD). In this approach, the applications and services of decision-making processes deal with sentinels instead of managing complex details associated with the PDs, VDs, and cloud computing infrastructures. A prototype based on the proposed method was implemented to conduct a case study based on a blockchain system for verifying contract violation in sensors used in product transportation logistics. The evaluation showed the effectiveness of sentinels enabling organizations to attain data from IoT sensors and the dataflows used by decision-making processes to convert these data into useful information.}, } @article {pmid34450960, year = {2021}, author = {Schackart, KE and Yoon, JY}, title = {Machine Learning Enhances the Performance of Bioreceptor-Free Biosensors.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {16}, pages = {}, pmid = {34450960}, issn = {1424-8220}, support = {P30 ES006694/ES/NIEHS NIH HHS/United States ; T32GM132008/NH/NIH HHS/United States ; }, mesh = {*Biosensing Techniques ; Machine Learning ; Neural Networks, Computer ; Spectrum Analysis, Raman ; Support Vector Machine ; }, abstract = {Since their inception, biosensors have frequently employed simple regression models to calculate analyte composition based on the biosensor's signal magnitude. Traditionally, bioreceptors provide excellent sensitivity and specificity to the biosensor. Increasingly, however, bioreceptor-free biosensors have been developed for a wide range of applications. Without a bioreceptor, maintaining strong specificity and a low limit of detection have become the major challenge. Machine learning (ML) has been introduced to improve the performance of these biosensors, effectively replacing the bioreceptor with modeling to gain specificity. Here, we present how ML has been used to enhance the performance of these bioreceptor-free biosensors. Particularly, we discuss how ML has been used for imaging, Enose and Etongue, and surface-enhanced Raman spectroscopy (SERS) biosensors. Notably, principal component analysis (PCA) combined with support vector machine (SVM) and various artificial neural network (ANN) algorithms have shown outstanding performance in a variety of tasks. We anticipate that ML will continue to improve the performance of bioreceptor-free biosensors, especially with the prospects of sharing trained models and cloud computing for mobile computation. To facilitate this, the biosensing community would benefit from increased contributions to open-access data repositories for biosensor data.}, } @article {pmid34450933, year = {2021}, author = {Gupta, D and Rani, S and Ahmed, SH and Verma, S and Ijaz, MF and Shafi, J}, title = {Edge Caching Based on Collaborative Filtering for Heterogeneous ICN-IoT Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {16}, pages = {}, pmid = {34450933}, issn = {1424-8220}, abstract = {The substantial advancements offered by the edge computing has indicated serious evolutionary improvements for the internet of things (IoT) technology. The rigid design philosophy of the traditional network architecture limits its scope to meet future demands. However, information centric networking (ICN) is envisioned as a promising architecture to bridge the huge gaps and maintain IoT networks, mostly referred as ICN-IoT. The edge-enabled ICN-IoT architecture always demands efficient in-network caching techniques for supporting better user's quality of experience (QoE). In this paper, we propose an enhanced ICN-IoT content caching strategy by enabling artificial intelligence (AI)-based collaborative filtering within the edge cloud to support heterogeneous IoT architecture. This collaborative filtering-based content caching strategy would intelligently cache content on edge nodes for traffic management at cloud databases. The evaluations has been conducted to check the performance of the proposed strategy over various benchmark strategies, such as LCE, LCD, CL4M, and ProbCache. The analytical results demonstrate the better performance of our proposed strategy with average gain of 15% for cache hit ratio, 12% reduction in content retrieval delay, and 28% reduced average hop count in comparison to best considered LCD. We believe that the proposed strategy will contribute an effective solution to the related studies in this domain.}, } @article {pmid34450808, year = {2021}, author = {Wang, Q and Mu, H}, title = {Privacy-Preserving and Lightweight Selective Aggregation with Fault-Tolerance for Edge Computing-Enhanced IoT.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {16}, pages = {}, pmid = {34450808}, issn = {1424-8220}, mesh = {Algorithms ; Computer Security ; Confidentiality ; *Internet of Things ; *Privacy ; }, abstract = {Edge computing has been introduced to the Internet of Things (IoT) to meet the requirements of IoT applications. At the same time, data aggregation is widely used in data processing to reduce the communication overhead and energy consumption in IoT. Most existing schemes aggregate the overall data without filtering. In addition, aggregation schemes also face huge challenges, such as the privacy of the individual IoT device's data or the fault-tolerant and lightweight requirements of the schemes. In this paper, we present a privacy-preserving and lightweight selective aggregation scheme with fault tolerance (PLSA-FT) for edge computing-enhanced IoT. In PLSA-FT, selective aggregation can be achieved by constructing Boolean responses and numerical responses according to specific query conditions of the cloud center. Furthermore, we modified the basic Paillier homomorphic encryption to guarantee data privacy and support fault tolerance of IoT devices' malfunctions. An online/offline signature mechanism is utilized to reduce computation costs. The system characteristic analyses prove that the PLSA-FT scheme achieves confidentiality, privacy preservation, source authentication, integrity verification, fault tolerance, and dynamic membership management. Moreover, performance evaluation results show that PLSA-FT is lightweight with low computation costs and communication overheads.}, } @article {pmid34450797, year = {2021}, author = {Liu, Y and Ni, Z and Karlsson, M and Gong, S}, title = {Methodology for Digital Transformation with Internet of Things and Cloud Computing: A Practical Guideline for Innovation in Small- and Medium-Sized Enterprises.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {16}, pages = {}, pmid = {34450797}, issn = {1424-8220}, mesh = {Cloud Computing ; Industry ; *Internet of Things ; Technology ; }, abstract = {Researches on the Internet of Things (IoT) and cloud computing have been pervasive in both the academic and industrial world. IoT and cloud computing are seen as cornerstones to digital transformation in the industry. However, restricted by limited resources and the lack of expertise in information and communication technologies, small- and medium-sized enterprises (SMEs) have difficulty in achieving digitalization of their business. In this paper, we propose a reference framework for SMEs to follow as a guideline in the journey of digital transformation. The framework features a three-stage procedure that covers business, technology, and innovation, which can be iterated to drive product and business development. A case study about digital transformation taking place in the vertical plant wall industry is detailed. Furthermore, some solution design principles that are concluded from real industrial practice are presented. This paper reviews the digital transformation practice in the vertical plant wall industry and aims to accelerate the pace of SMEs in the journey of digital transformation.}, } @article {pmid34450717, year = {2021}, author = {Pérez-Pons, ME and Alonso, RS and García, O and Marreiros, G and Corchado, JM}, title = {Deep Q-Learning and Preference Based Multi-Agent System for Sustainable Agricultural Market.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {16}, pages = {}, pmid = {34450717}, issn = {1424-8220}, support = {RTC-2017-6536-7//European Regional Development Fund/ ; 0677\_DISRUPTIVE\_2\_E//European Regional Development Fund/ ; }, mesh = {*Agriculture ; *Climate Change ; }, abstract = {Yearly population growth will lead to a significant increase in agricultural production in the coming years. Twenty-first century agricultural producers will be facing the challenge of achieving food security and efficiency. This must be achieved while ensuring sustainable agricultural systems and overcoming the problems posed by climate change, depletion of water resources, and the potential for increased erosion and loss of productivity due to extreme weather conditions. Those environmental consequences will directly affect the price setting process. In view of the price oscillations and the lack of transparent information for buyers, a multi-agent system (MAS) is presented in this article. It supports the making of decisions in the purchase of sustainable agricultural products. The proposed MAS consists of a system that supports decision-making when choosing a supplier on the basis of certain preference-based parameters aimed at measuring the sustainability of a supplier and a deep Q-learning agent for agricultural future market price forecast. Therefore, different agri-environmental indicators (AEIs) have been considered, as well as the use of edge computing technologies to reduce costs of data transfer to the cloud. The presented MAS combines price setting optimizations and user preferences in regards to accessing, filtering, and integrating information. The agents filter and fuse information relevant to a user according to supplier attributes and a dynamic environment. The results presented in this paper allow a user to choose the supplier that best suits their preferences as well as to gain insight on agricultural future markets price oscillations through a deep Q-learning agent.}, } @article {pmid34450715, year = {2021}, author = {Ni, Z and Liu, Y and Karlsson, M and Gong, S}, title = {A Sensing System Based on Public Cloud to Monitor Indoor Environment of Historic Buildings.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {16}, pages = {}, pmid = {34450715}, issn = {1424-8220}, support = {DNR:2019-023737//Energimyndigheten/ ; }, mesh = {*Artificial Intelligence ; *Cloud Computing ; Electrocardiography ; Monitoring, Physiologic ; }, abstract = {Monitoring the indoor environment of historic buildings helps to identify potential risks, provide guidelines for improving regular maintenance, and preserve cultural artifacts. However, most of the existing monitoring systems proposed for historic buildings are not for general digitization purposes that provide data for smart services employing, e.g., artificial intelligence with machine learning. In addition, considering that preserving historic buildings is a long-term process that demands preventive maintenance, a monitoring system requires stable and scalable storage and computing resources. In this paper, a digitalization framework is proposed for smart preservation of historic buildings. A sensing system following the architecture of this framework is implemented by integrating various advanced digitalization techniques, such as Internet of Things, Edge computing, and Cloud computing. The sensing system realizes remote data collection, enables viewing real-time and historical data, and provides the capability for performing real-time analysis to achieve preventive maintenance of historic buildings in future research. Field testing results show that the implemented sensing system has a 2% end-to-end loss rate for collecting data samples and the loss rate can be decreased to 0.3%. The low loss rate indicates that the proposed sensing system has high stability and meets the requirements for long-term monitoring of historic buildings.}, } @article {pmid34445517, year = {2021}, author = {Bussola, N and Papa, B and Melaiu, O and Castellano, A and Fruci, D and Jurman, G}, title = {Quantification of the Immune Content in Neuroblastoma: Deep Learning and Topological Data Analysis in Digital Pathology.}, journal = {International journal of molecular sciences}, volume = {22}, number = {16}, pages = {}, pmid = {34445517}, issn = {1422-0067}, mesh = {Cloud Computing ; Deep Learning ; Female ; Humans ; Image Interpretation, Computer-Assisted/*methods ; Lymphocytes/metabolism ; Male ; Neural Networks, Computer ; Neuroblastoma/diagnostic imaging/*immunology ; }, abstract = {We introduce here a novel machine learning (ML) framework to address the issue of the quantitative assessment of the immune content in neuroblastoma (NB) specimens. First, the EUNet, a U-Net with an EfficientNet encoder, is trained to detect lymphocytes on tissue digital slides stained with the CD3 T-cell marker. The training set consists of 3782 images extracted from an original collection of 54 whole slide images (WSIs), manually annotated for a total of 73,751 lymphocytes. Resampling strategies, data augmentation, and transfer learning approaches are adopted to warrant reproducibility and to reduce the risk of overfitting and selection bias. Topological data analysis (TDA) is then used to define activation maps from different layers of the neural network at different stages of the training process, described by persistence diagrams (PD) and Betti curves. TDA is further integrated with the uniform manifold approximation and projection (UMAP) dimensionality reduction and the hierarchical density-based spatial clustering of applications with noise (HDBSCAN) algorithm for clustering, by the deep features, the relevant subgroups and structures, across different levels of the neural network. Finally, the recent TwoNN approach is leveraged to study the variation of the intrinsic dimensionality of the U-Net model. As the main task, the proposed pipeline is employed to evaluate the density of lymphocytes over the whole tissue area of the WSIs. The model achieves good results with mean absolute error 3.1 on test set, showing significant agreement between densities estimated by our EUNet model and by trained pathologists, thus indicating the potentialities of a promising new strategy in the quantification of the immune content in NB specimens. Moreover, the UMAP algorithm unveiled interesting patterns compatible with pathological characteristics, also highlighting novel insights into the dynamics of the intrinsic dataset dimensionality at different stages of the training process. All the experiments were run on the Microsoft Azure cloud platform.}, } @article {pmid34444132, year = {2021}, author = {Cai, X and Xu, D}, title = {Application of Edge Computing Technology in Hydrological Spatial Analysis and Ecological Planning.}, journal = {International journal of environmental research and public health}, volume = {18}, number = {16}, pages = {}, pmid = {34444132}, issn = {1660-4601}, mesh = {China ; *Ecosystem ; *Hydrology ; Spatial Analysis ; Technology ; Urbanization ; }, abstract = {The process of rapid urbanization causes so many water security issues such as urban waterlogging, environmental water pollution, water shortages, etc. It is, therefore, necessary for us to integrate a variety of theories, methods, measures, and means to conduct ecological problem diagnosis, ecological function demand assessment, and ecological security pattern planning. Here, EC (Edge Computing) technology is applied to analyze the hydrological spatial structure characteristics and ecological planning method of waterfront green space. First, various information is collected and scientifically analyzed around the core element of ecological planning: water. Then, in-depth research is conducted on the previous hydrological spatial analysis methods to identify their defects. Subsequently, given these defects, the EC technology is introduced to design a bottom-up overall architecture of intelligent ecological planning gateway, which can be divided into field devices, EC intelligent planning gateway, transmission system, and cloud processing platform. Finally, the performance of the overall architecture of the intelligent ecological planning gateway is tested. The study aims to optimize the performance of the hydrological spatial analysis method and ecological planning method in Xianglan town of Jiamusi city. The results show that the system can detect the flood control safety system planning, analysis of water source pollution. Additionally, the system also can use the EC technology, depending on the types, hydrological characteristics, pollutants to predict treatment sludge need to put in the pollutant treatment medicament composition and dosage, protection of water source nearby residents public health security. Compared with previous hydrological spatial analysis and ecological planning methods, the system is more scientific, efficient, and expandable. The results provide a technical basis for the research in related fields.}, } @article {pmid34442097, year = {2021}, author = {Spangler, HD and Simancas-Pallares, MA and Ginnis, J and Ferreira Zandoná, AG and Roach, J and Divaris, K}, title = {A Web-Based Rendering Application for Communicating Dental Conditions.}, journal = {Healthcare (Basel, Switzerland)}, volume = {9}, number = {8}, pages = {}, pmid = {34442097}, issn = {2227-9032}, support = {U01DE025046/DE/NIDCR NIH HHS/United States ; Grover C. Hunter Research Fund//Dental Foundation of North Carolina/ ; Viviana R. Duce Fellowship in Pediatric Dentistry//Dental Foundation of North Carolina/ ; }, abstract = {The importance of visual aids in communicating clinical examination findings or proposed treatments in dentistry cannot be overstated. Similarly, communicating dental research results with tooth surface-level precision is impractical without visual representations. Here, we present the development, deployment, and two real-life applications of a web-based data visualization informatics pipeline that converts tooth surface-level information to colorized, three-dimensional renderings. The core of the informatics pipeline focuses on texture (UV) mapping of a pre-existing model of the human primary dentition. The 88 individually segmented tooth surfaces receive independent inputs that are represented in colors and textures according to customizable user specifications. The web implementation SculptorHD, deployed on the Google Cloud Platform, can accommodate manually entered or spreadsheet-formatted tooth surface data and allows the customization of color palettes and thresholds, as well as surface textures (e.g., condition-free, caries lesions, stainless steel, or ceramic crowns). Its current implementation enabled the visualization and interpretation of clinical early childhood caries (ECC) subtypes using latent class analysis-derived caries experience summary data. As a demonstration of its potential clinical utility, the tool was also used to simulate the restorative treatment presentation of a severe ECC case, including the use of stainless steel and ceramic crowns. We expect that this publicly available web-based tool can aid clinicians and investigators deliver precise, visual presentations of dental conditions and proposed treatments. The creation of rapidly adjustable lifelike dental models, integrated to existing electronic health records and responsive to new clinical findings or planned for future work, is likely to boost two-way communication between clinicians and their patients.}, } @article {pmid34435200, year = {2022}, author = {Lacey, JV and Benbow, JL}, title = {Invited Commentary: Standards, Inputs, and Outputs-Strategies for Improving Data-Sharing and Consortia-Based Epidemiologic Research.}, journal = {American journal of epidemiology}, volume = {191}, number = {1}, pages = {159-162}, doi = {10.1093/aje/kwab217}, pmid = {34435200}, issn = {1476-6256}, mesh = {Epidemiologic Studies ; Humans ; *Information Dissemination ; *Metabolomics ; Reference Standards ; }, abstract = {Data-sharing improves epidemiologic research, but the sharing of data frustrates epidemiologic researchers. The inefficiencies of current methods and options for data-sharing are increasingly documented and easily understood by any study group that has shared its data and any researcher who has received shared data. In this issue of the Journal, Temprosa et al. (Am J Epidemiol. 2021;191(1):147-158) describe how the Consortium of Metabolomics Studies (COMETS) developed and deployed a flexible analytical platform to eliminate key pain points in large-scale metabolomics research. COMETS Analytics includes an online tool, but its cloud computing and technology are the supporting rather than the leading actors in this script. The COMETS team identified the need to standardize diverse and inconsistent metabolomics and covariate data and models across its many participating cohort studies, and then developed a flexible tool that gave its member studies choices about how they wanted to meet the consortium's analytical requirements. Different specialties will have different specific research needs and will probably continue to use and develop an array of diverse analytical and technical solutions for their projects. COMETS Analytics shows how important-and enabling-the upstream attention to data standards and data consistency is to producing high-quality metabolomics, consortia-based, and large-scale epidemiology research.}, } @article {pmid34435101, year = {2021}, author = {Edu, AS and Agoyi, M and Agozie, D}, title = {Digital security vulnerabilities and threats implications for financial institutions deploying digital technology platforms and application: FMEA and FTOPSIS analysis.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e658}, pmid = {34435101}, issn = {2376-5992}, abstract = {Digital disruptions have led to the integration of applications, platforms, and infrastructure. They assist in business operations, promoting open digital collaborations, and perhaps even the integration of the Internet of Things (IoTs), Big Data Analytics, and Cloud Computing to support data sourcing, data analytics, and storage synchronously on a single platform. Notwithstanding the benefits derived from digital technology integration (including IoTs, Big Data Analytics, and Cloud Computing), digital vulnerabilities and threats have become a more significant concern for users. We addressed these challenges from an information systems perspective and have noted that more research is needed identifying potential vulnerabilities and threats affecting the integration of IoTs, BDA and CC for data management. We conducted a step-by-step analysis of the potential vulnerabilities and threats affecting the integration of IoTs, Big Data Analytics, and Cloud Computing for data management. We combined multi-dimensional analysis, Failure Mode Effect Analysis, and Fuzzy Technique for Order of Preference by Similarity for Ideal Solution to evaluate and rank the potential vulnerabilities and threats. We surveyed 234 security experts from the banking industry with adequate knowledge in IoTs, Big Data Analytics, and Cloud Computing. Based on the closeness of the coefficients, we determined that insufficient use of backup electric generators, firewall protection failures, and no information security audits are high-ranking vulnerabilities and threats affecting integration. This study is an extension of discussions on the integration of digital applications and platforms for data management and the pervasive vulnerabilities and threats arising from that. A detailed review and classification of these threats and vulnerabilities are vital for sustaining businesses' digital integration.}, } @article {pmid34432855, year = {2021}, author = {Mohd Romlay, MR and Mohd Ibrahim, A and Toha, SF and De Wilde, P and Venkat, I}, title = {Novel CE-CBCE feature extraction method for object classification using a low-density LiDAR point cloud.}, journal = {PloS one}, volume = {16}, number = {8}, pages = {e0256665}, pmid = {34432855}, issn = {1932-6203}, mesh = {*Algorithms ; Cluster Analysis ; Humans ; *Lasers ; Robotics ; Software ; }, abstract = {Low-end LiDAR sensor provides an alternative for depth measurement and object recognition for lightweight devices. However due to low computing capacity, complicated algorithms are incompatible to be performed on the device, with sparse information further limits the feature available for extraction. Therefore, a classification method which could receive sparse input, while providing ample leverage for the classification process to accurately differentiate objects within limited computing capability is required. To achieve reliable feature extraction from a sparse LiDAR point cloud, this paper proposes a novel Clustered Extraction and Centroid Based Clustered Extraction Method (CE-CBCE) method for feature extraction followed by a convolutional neural network (CNN) object classifier. The integration of the CE-CBCE and CNN methods enable us to utilize lightweight actuated LiDAR input and provides low computing means of classification while maintaining accurate detection. Based on genuine LiDAR data, the final result shows reliable accuracy of 97% through the method proposed.}, } @article {pmid34430081, year = {2021}, author = {Zhao, J and Yu, L and Liu, H and Huang, H and Wang, J and Gong, P}, title = {Towards an open and synergistic framework for mapping global land cover.}, journal = {PeerJ}, volume = {9}, number = {}, pages = {e11877}, pmid = {34430081}, issn = {2167-8359}, abstract = {Global land-cover datasets are key sources of information for understanding the complex inter-actions between human activities and global change. They are also among the most critical variables for climate change studies. Over time, the spatial resolution of land cover maps has increased from the kilometer scale to 10-m scale. Single-type historical land cover datasets, including for forests, water, and impervious surfaces, have also been developed in recent years. In this study, we present an open and synergy framework to produce a global land cover dataset that combines supervised land cover classification and aggregation of existing multiple thematic land cover maps with the Google Earth Engine (GEE) cloud computing platform. On the basis of this method of classification and mosaicking, we derived a global land cover dataset for 6 years over a time span of 25 years. The overall accuracies of the six maps were around 75% and the accuracy for change area detection was over 70%. Our product also showed good similarity with the FAO and existing land cover maps.}, } @article {pmid34425749, year = {2021}, author = {Reddy, S and Hung, LH and Sala-Torra, O and Radich, JP and Yeung, CC and Yeung, KY}, title = {A graphical, interactive and GPU-enabled workflow to process long-read sequencing data.}, journal = {BMC genomics}, volume = {22}, number = {1}, pages = {626}, pmid = {34425749}, issn = {1471-2164}, support = {UG1 CA233338/NH/NIH HHS/United States ; R01 GM126019/GM/NIGMS NIH HHS/United States ; Hyundai Hope on Wheel Scholars Hope Grant//Hyuandai/ ; Young Investigator Award//Natioonal Comprehensive Cancer Network/ ; R01 CA175008/NH/NIH HHS/United States ; R01GM126019/NH/NIH HHS/United States ; }, mesh = {*Computational Biology ; Reproducibility of Results ; Sequence Analysis ; *Software ; Workflow ; }, abstract = {BACKGROUND: Long-read sequencing has great promise in enabling portable, rapid molecular-assisted cancer diagnoses. A key challenge in democratizing long-read sequencing technology in the biomedical and clinical community is the lack of graphical bioinformatics software tools which can efficiently process the raw nanopore reads, support graphical output and interactive visualizations for interpretations of results. Another obstacle is that high performance software tools for long-read sequencing data analyses often leverage graphics processing units (GPU), which is challenging and time-consuming to configure, especially on the cloud.

RESULTS: We present a graphical cloud-enabled workflow for fast, interactive analysis of nanopore sequencing data using GPUs. Users customize parameters, monitor execution and visualize results through an accessible graphical interface. The workflow and its components are completely containerized to ensure reproducibility and facilitate installation of the GPU-enabled software. We also provide an Amazon Machine Image (AMI) with all software and drivers pre-installed for GPU computing on the cloud. Most importantly, we demonstrate the potential of applying our software tools to reduce the turnaround time of cancer diagnostics by generating blood cancer (NB4, K562, ME1, 238 MV4;11) cell line Nanopore data using the Flongle adapter. We observe a 29x speedup and a 93x reduction in costs for the rate-limiting basecalling step in the analysis of blood cancer cell line data.

CONCLUSIONS: Our interactive and efficient software tools will make analyses of Nanopore data using GPU and cloud computing accessible to biomedical and clinical scientists, thus facilitating the adoption of cost effective, fast, portable and real-time long-read sequencing.}, } @article {pmid34424931, year = {2021}, author = {Zhao, Y and Sazlina, SG and Rokhani, FZ and Su, J and Chew, BH}, title = {The expectations and acceptability of a smart nursing home model among Chinese elderly people: A mixed methods study protocol.}, journal = {PloS one}, volume = {16}, number = {8}, pages = {e0255865}, pmid = {34424931}, issn = {1932-6203}, mesh = {Aged ; China ; Family/psychology ; Female ; Humans ; Interviews as Topic ; Male ; Middle Aged ; *Models, Nursing ; *Nursing Homes/standards ; Surveys and Questionnaires ; }, abstract = {Nursing homes integrated with smart information such as the Internet of Things, cloud computing, artificial intelligence, and digital health could improve not only the quality of care but also benefit the residents and health professionals by providing effective care and efficient medical services. However, a clear concept of a smart nursing home, the expectations and acceptability from the perspectives of the elderly people and their family members are still unclear. In addition, instruments to measure the expectations and acceptability of a smart nursing home are also lacking. The study aims to explore and determine the levels of these expectations, acceptability and the associated sociodemographic factors. This exploratory sequential mixed methods study comprises a qualitative study which will be conducted through a semi-structured interview to explore the expectations and acceptability of a smart nursing home among Chinese elderly people and their family members (Phase I). Next, a questionnaire will be developed and validated based on the results of a qualitative study in Phase I and a preceding scoping review on smart nursing homes by the same authors (Phase II). Lastly, a nationwide survey will be carried out to examine the levels of expectations and acceptability, and the associated sociodemographic factors with the different categories of expectations and acceptability (Phase III). With a better understanding of the Chinese elderly people's expectations and acceptability of smart technologies in nursing homes, a feasible smart nursing home model that incorporates appropriate technologies, integrates needed medical services and business concepts could be formulated and tested as a solution for the rapidly ageing societies in many developed and developing countries.}, } @article {pmid36713097, year = {2021}, author = {Roffi, M and Casadei, B and Gouillard, C and Nambatingué, N and Daval, G and Bardinet, I and Priori, SG}, title = {Digital transformation of major scientific meetings induced by the COVID-19 pandemic: insights from the ESC 2020 annual congress.}, journal = {European heart journal. Digital health}, volume = {2}, number = {4}, pages = {704-712}, doi = {10.1093/ehjdh/ztab076}, pmid = {36713097}, issn = {2634-3916}, support = {CH/12/3/29609/BHF_/British Heart Foundation/United Kingdom ; }, abstract = {As a consequence of the COVID-19 pandemic, the European Society of Cardiology (ESC) was forced to pivot the scientific programme of the ESC Congress 2021 into a totally new format for online consumption, The Digital Experience. A variety of new suppliers were involved, including experts in TV studio, cloud infrastructure, online platforms, video management, and online analytics. An information technology platform able to support hundreds of thousands simultaneous connections was built and cloud computing technologies were put in place to help scale up and down the resources needed for the high number of users at peak times. The video management system was characterized by multiple layers of security and redundancy and offered the same fluidity, albeit at a different resolution, to all user independently of the performance of their internet connection. The event, free for all users, was an undisputed success, both from a scientific/educational as well as from a digital technology perspective. The number of registrations increased by almost four-fold when compared with the 2019 record-breaking edition in Paris, with a greater proportion of younger and female participants as well as of participants from low- and middle-income countries. No major technical failures were encountered. For the first time in history, attendees from all around the globe had the same real-time access to the world's most popular cardiovascular conference.}, } @article {pmid34416827, year = {2021}, author = {Tahmasebi, A and Qu, E and Sevrukov, A and Liu, JB and Wang, S and Lyshchik, A and Yu, J and Eisenbrey, JR}, title = {Assessment of Axillary Lymph Nodes for Metastasis on Ultrasound Using Artificial Intelligence.}, journal = {Ultrasonic imaging}, volume = {43}, number = {6}, pages = {329-336}, doi = {10.1177/01617346211035315}, pmid = {34416827}, issn = {1096-0910}, mesh = {*Artificial Intelligence ; Axilla ; *Breast Neoplasms/diagnostic imaging ; Female ; Humans ; Lymph Nodes/diagnostic imaging ; Lymphatic Metastasis ; Sensitivity and Specificity ; }, abstract = {The purpose of this study was to evaluate an artificial intelligence (AI) system for the classification of axillary lymph nodes on ultrasound compared to radiologists. Ultrasound images of 317 axillary lymph nodes from patients referred for ultrasound guided fine needle aspiration or core needle biopsy and corresponding pathology findings were collected. Lymph nodes were classified into benign and malignant groups with histopathological result serving as the reference. Google Cloud AutoML Vision (Mountain View, CA) was used for AI image classification. Three experienced radiologists also classified the images and gave a level of suspicion score (1-5). To test the accuracy of AI, an external testing dataset of 64 images from 64 independent patients was evaluated by three AI models and the three readers. The diagnostic performance of AI and the humans were then quantified using receiver operating characteristics curves. In the complete set of 317 images, AutoML achieved a sensitivity of 77.1%, positive predictive value (PPV) of 77.1%, and an area under the precision recall curve of 0.78, while the three radiologists showed a sensitivity of 87.8% ± 8.5%, specificity of 50.3% ± 16.4%, PPV of 61.1% ± 5.4%, negative predictive value (NPV) of 84.1% ± 6.6%, and accuracy of 67.7% ± 5.7%. In the three external independent test sets, AI and human readers achieved sensitivity of 74.0% ± 0.14% versus 89.9% ± 0.06% (p = .25), specificity of 64.4% ± 0.11% versus 50.1 ± 0.20% (p = .22), PPV of 68.3% ± 0.04% versus 65.4 ± 0.07% (p = .50), NPV of 72.6% ± 0.11% versus 82.1% ± 0.08% (p = .33), and accuracy of 69.5% ± 0.06% versus 70.1% ± 0.07% (p = .90), respectively. These preliminary results indicate AI has comparable performance to trained radiologists and could be used to predict the presence of metastasis in ultrasound images of axillary lymph nodes.}, } @article {pmid34411131, year = {2021}, author = {Khashan, E and Eldesouky, A and Elghamrawy, S}, title = {An adaptive spark-based framework for querying large-scale NoSQL and relational databases.}, journal = {PloS one}, volume = {16}, number = {8}, pages = {e0255562}, pmid = {34411131}, issn = {1932-6203}, mesh = {*Algorithms ; Cloud Computing/*statistics & numerical data ; Data Management/*methods ; Database Management Systems/*standards ; *Databases, Factual ; Information Storage and Retrieval/*statistics & numerical data ; *Software ; }, abstract = {The growing popularity of big data analysis and cloud computing has created new big data management standards. Sometimes, programmers may interact with a number of heterogeneous data stores depending on the information they are responsible for: SQL and NoSQL data stores. Interacting with heterogeneous data models via numerous APIs and query languages imposes challenging tasks on multi-data processing developers. Indeed, complex queries concerning homogenous data structures cannot currently be performed in a declarative manner when found in single data storage applications and therefore require additional development efforts. Many models were presented in order to address complex queries Via multistore applications. Some of these models implemented a complex unified and fast model, while others' efficiency is not good enough to solve this type of complex database queries. This paper provides an automated, fast and easy unified architecture to solve simple and complex SQL and NoSQL queries over heterogeneous data stores (CQNS). This proposed framework can be used in cloud environments or for any big data application to automatically help developers to manage basic and complicated database queries. CQNS consists of three layers: matching selector layer, processing layer, and query execution layer. The matching selector layer is the heart of this architecture in which five of the user queries are examined if they are matched with another five queries stored in a single engine stored in the architecture library. This is achieved through a proposed algorithm that directs the query to the right SQL or NoSQL database engine. Furthermore, CQNS deal with many NoSQL Databases like MongoDB, Cassandra, Riak, CouchDB, and NOE4J databases. This paper presents a spark framework that can handle both SQL and NoSQL Databases. Four scenarios' benchmarks datasets are used to evaluate the proposed CQNS for querying different NoSQL Databases in terms of optimization process performance and query execution time. The results show that, the CQNS achieves best latency and throughput in less time among the compared systems.}, } @article {pmid36567694, year = {2021}, author = {Singh, M and Singh, BB and Singh, R and Upendra, B and Kaur, R and Gill, SS and Biswas, MS}, title = {Quantifying COVID-19 enforced global changes in atmospheric pollutants using cloud computing based remote sensing.}, journal = {Remote sensing applications : society and environment}, volume = {22}, number = {}, pages = {100489}, pmid = {36567694}, issn = {2352-9385}, abstract = {Global lockdowns in response to the COVID-19 pandemic have led to changes in the anthropogenic activities resulting in perceivable air quality improvements. Although several recent studies have analyzed these changes over different regions of the globe, these analyses have been constrained due to the usage of station based data which is mostly limited up to the metropolitan cities. Also the quantifiable changes have been reported only for the developed and developing regions leaving the poor economies (e.g. Africa) due to the shortage of in-situ data. Using a comprehensive set of high spatiotemporal resolution satellites and merged products of air pollutants, we analyze the air quality across the globe and quantify the improvement resulting from the suppressed anthropogenic activity during the lockdowns. In particular, we focus on megacities, capitals and cities with high standards of living to make the quantitative assessment. Our results offer valuable insights into the spatial distribution of changes in the air pollutants due to COVID-19 enforced lockdowns. Statistically significant reductions are observed over megacities with mean reduction by 19.74%, 7.38% and 49.9% in nitrogen dioxide (NO2), aerosol optical depth (AOD) and PM2.5 concentrations. Google Earth Engine empowered cloud computing based remote sensing is used and the results provide a testbed for climate sensitivity experiments and validation of chemistry-climate models. Additionally, Google Earth Engine based apps have been developed to visualize the changes in a real-time fashion.}, } @article {pmid35782189, year = {2021}, author = {Liu, J and Miao, F and Yin, L and Pang, Z and Li, Y}, title = {A Noncontact Ballistocardiography-Based IoMT System for Cardiopulmonary Health Monitoring of Discharged COVID-19 Patients.}, journal = {IEEE internet of things journal}, volume = {8}, number = {21}, pages = {15807-15817}, pmid = {35782189}, issn = {2327-4662}, abstract = {We developed a ballistocardiography (BCG)-based Internet-of-Medical-Things (IoMT) system for remote monitoring of cardiopulmonary health. The system composes of BCG sensor, edge node, and cloud platform. To improve computational efficiency and system stability, the system adopted collaborative computing between edge nodes and cloud platforms. Edge nodes undertake signal processing tasks, namely approximate entropy for signal quality assessment, a lifting wavelet scheme for separating the BCG and respiration signal, and the lightweight BCG and respiration signal peaks detection. Heart rate variability (HRV), respiratory rate variability (RRV) analysis and other intelligent computing are performed on cloud platform. In experiments with 25 participants, the proposed method achieved a mean absolute error (MAE)±standard deviation of absolute error (SDAE) of 9.6±8.2 ms for heartbeat intervals detection, and a MAE±SDAE of 22.4±31.1 ms for respiration intervals detection. To study the recovery of cardiopulmonary function in patients with coronavirus disease 2019 (COVID-19), this study recruited 186 discharged patients with COVID-19 and 186 control volunteers. The results indicate that the recovery performance of the respiratory rhythm is better than the heart rhythm among discharged patients with COVID-19. This reminds the patients to be aware of the risk of cardiovascular disease after recovering from COVID-19. Therefore, our remote monitoring system has the ability to play a major role in the follow up and management of discharged patients with COVID-19.}, } @article {pmid36299497, year = {2021}, author = {Jensen, TL and Hooper, WF and Cherikh, SR and Goll, JB}, title = {RP-REP Ribosomal Profiling Reports: an open-source cloud-enabled framework for reproducible ribosomal profiling data processing, analysis, and result reporting.}, journal = {F1000Research}, volume = {10}, number = {}, pages = {143}, pmid = {36299497}, issn = {2046-1402}, abstract = {Ribosomal profiling is an emerging experimental technology to measure protein synthesis by sequencing short mRNA fragments undergoing translation in ribosomes. Applied on the genome wide scale, this is a powerful tool to profile global protein synthesis within cell populations of interest. Such information can be utilized for biomarker discovery and detection of treatment-responsive genes. However, analysis of ribosomal profiling data requires careful preprocessing to reduce the impact of artifacts and dedicated statistical methods for visualizing and modeling the high-dimensional discrete read count data. Here we present Ribosomal Profiling Reports (RP-REP), a new open-source cloud-enabled software that allows users to execute start-to-end gene-level ribosomal profiling and RNA-Seq analysis on a pre-configured Amazon Virtual Machine Image (AMI) hosted on AWS or on the user's own Ubuntu Linux server. The software works with FASTQ files stored locally, on AWS S3, or at the Sequence Read Archive (SRA). RP-REP automatically executes a series of customizable steps including filtering of contaminant RNA, enrichment of true ribosomal footprints, reference alignment and gene translation quantification, gene body coverage, CRAM compression, reference alignment QC, data normalization, multivariate data visualization, identification of differentially translated genes, and generation of heatmaps, co-translated gene clusters, enriched pathways, and other custom visualizations. RP-REP provides functionality to contrast RNA-SEQ and ribosomal profiling results, and calculates translational efficiency per gene. The software outputs a PDF report and publication-ready table and figure files. As a use case, we provide RP-REP results for a dengue virus study that tested cytosol and endoplasmic reticulum cellular fractions of human Huh7 cells pre-infection and at 6 h, 12 h, 24 h, and 40 h post-infection. Case study results, Ubuntu installation scripts, and the most recent RP-REP source code are accessible at GitHub. The cloud-ready AMI is available at AWS (AMI ID: RPREP RSEQREP (Ribosome Profiling and RNA-Seq Reports) v2.1 (ami-00b92f52d763145d3)).}, } @article {pmid36939746, year = {2021}, author = {Zhang, G and Zhang, Y and Jin, J}, title = {The Ultrafast and Accurate Mapping Algorithm FANSe3: Mapping a Human Whole-Genome Sequencing Dataset Within 30 Minutes.}, journal = {Phenomics (Cham, Switzerland)}, volume = {1}, number = {1}, pages = {22-30}, pmid = {36939746}, issn = {2730-5848}, abstract = {Aligning billions of reads generated by the next-generation sequencing (NGS) to reference sequences, termed "mapping", is the time-consuming and computationally-intensive process in most NGS applications. A Fast, accurate and robust mapping algorithm is highly needed. Therefore, we developed the FANSe3 mapping algorithm, which can map a 30 × human whole-genome sequencing (WGS) dataset within 30 min, a 50 × human whole exome sequencing (WES) dataset within 30 s, and a typical mRNA-seq dataset within seconds in a single-server node without the need for any hardware acceleration feature. Like its predecessor FANSe2, the error rate of FANSe3 can be kept as low as 10[-9] in most cases, this is more robust than the Burrows-Wheeler transform-based algorithms. Error allowance hardly affected the identification of a driver somatic mutation in clinically relevant WGS data and provided robust gene expression profiles regardless of the parameter settings and sequencer used. The novel algorithm, designed for high-performance cloud-computing after infrastructures, will break the bottleneck of speed and accuracy in NGS data analysis and promote NGS applications in various fields. The FANSe3 algorithm can be downloaded from the website: http://www.chi-biotech.com/fanse3/.}, } @article {pmid35782175, year = {2021}, author = {Tai, Y and Gao, B and Li, Q and Yu, Z and Zhu, C and Chang, V}, title = {Trustworthy and Intelligent COVID-19 Diagnostic IoMT Through XR and Deep-Learning-Based Clinic Data Access.}, journal = {IEEE internet of things journal}, volume = {8}, number = {21}, pages = {15965-15976}, pmid = {35782175}, issn = {2327-4662}, abstract = {This article presents a novel extended reality (XR) and deep-learning-based Internet-of-Medical-Things (IoMT) solution for the COVID-19 telemedicine diagnostic, which systematically combines virtual reality/augmented reality (AR) remote surgical plan/rehearse hardware, customized 5G cloud computing and deep learning algorithms to provide real-time COVID-19 treatment scheme clues. Compared to existing perception therapy techniques, our new technique can significantly improve performance and security. The system collected 25 clinic data from the 347 positive and 2270 negative COVID-19 patients in the Red Zone by 5G transmission. After that, a novel auxiliary classifier generative adversarial network-based intelligent prediction algorithm is conducted to train the new COVID-19 prediction model. Furthermore, The Copycat network is employed for the model stealing and attack for the IoMT to improve the security performance. To simplify the user interface and achieve an excellent user experience, we combined the Red Zone's guiding images with the Green Zone's view through the AR navigate clue by using 5G. The XR surgical plan/rehearse framework is designed, including all COVID-19 surgical requisite details that were developed with a real-time response guaranteed. The accuracy, recall, F1-score, and area under the ROC curve (AUC) area of our new IoMT were 0.92, 0.98, 0.95, and 0.98, respectively, which outperforms the existing perception techniques with significantly higher accuracy performance. The model stealing also has excellent performance, with the AUC area of 0.90 in Copycat slightly lower than the original model. This study suggests a new framework in the COVID-19 diagnostic integration and opens the new research about the integration of XR and deep learning for IoMT implementation.}, } @article {pmid36082106, year = {2021}, author = {Pipia, L and Amin, E and Belda, S and Salinero-Delgado, M and Verrelst, J}, title = {Green LAI Mapping and Cloud Gap-Filling Using Gaussian Process Regression in Google Earth Engine.}, journal = {Remote sensing}, volume = {13}, number = {3}, pages = {403}, pmid = {36082106}, issn = {2072-4292}, support = {755617/ERC_/European Research Council/International ; }, abstract = {For the last decade, Gaussian process regression (GPR) proved to be a competitive machine learning regression algorithm for Earth observation applications, with attractive unique properties such as band relevance ranking and uncertainty estimates. More recently, GPR also proved to be a proficient time series processor to fill up gaps in optical imagery, typically due to cloud cover. This makes GPR perfectly suited for large-scale spatiotemporal processing of satellite imageries into cloud-free products of biophysical variables. With the advent of the Google Earth Engine (GEE) cloud platform, new opportunities emerged to process local-to-planetary scale satellite data using advanced machine learning techniques and convert them into gap-filled vegetation properties products. However, GPR is not yet part of the GEE ecosystem. To circumvent this limitation, this work proposes a general adaptation of GPR formulation to parallel processing framework and its integration into GEE. To demonstrate the functioning and utility of the developed workflow, a GPR model predicting green leaf area index (LAI G) from Sentinel-2 imagery was imported. Although by running this GPR model into GEE any corner of the world can be mapped into LAI G at a resolution of 20 m, here we show some demonstration cases over western Europe with zoom-ins over Spain. Thanks to the computational power of GEE, the mapping takes place on-the-fly. Additionally, a GPR-based gap filling strategy based on pre-optimized kernel hyperparameters is also put forward for the generation of multi-orbit cloud-free LAI G maps with an unprecedented level of detail, and the extraction of regularly-sampled LAI G time series at a pixel level. The ability to plugin a locally-trained GPR model into the GEE framework and its instant processing opens up a new paradigm of remote sensing image processing.}, } @article {pmid36654308, year = {2021}, author = {Chen, X and Cheng, B and Li, Z and Nie, X and Yu, N and Yung, MH and Peng, X}, title = {Experimental cryptographic verification for near-term quantum cloud computing.}, journal = {Science bulletin}, volume = {66}, number = {1}, pages = {23-28}, doi = {10.1016/j.scib.2020.08.013}, pmid = {36654308}, issn = {2095-9281}, abstract = {An important task for quantum cloud computing is to make sure that there is a real quantum computer running, instead of classical simulation. Here we explore the applicability of a cryptographic verification scheme for verifying quantum cloud computing. We provided a theoretical extension and implemented the scheme on a 5-qubit NMR quantum processor in the laboratory and a 5-qubit and 16-qubit processors of the IBM quantum cloud. We found that the experimental results of the NMR processor can be verified by the scheme with about 1.4% error, after noise compensation by standard techniques. However, the fidelity of the IBM quantum cloud is currently too low to pass the test (about 42% error). This verification scheme shall become practical when servers claim to offer quantum-computing resources that can achieve quantum supremacy.}, } @article {pmid36081683, year = {2021}, author = {Berger, K and Caicedo, JPR and Martino, L and Wocher, M and Hank, T and Verrelst, J}, title = {A Survey of Active Learning for Quantifying Vegetation Traits from Terrestrial Earth Observation Data.}, journal = {Remote sensing}, volume = {13}, number = {2}, pages = {287}, pmid = {36081683}, issn = {2072-4292}, support = {755617/ERC_/European Research Council/International ; }, abstract = {The current exponential increase of spatiotemporally explicit data streams from satellitebased Earth observation missions offers promising opportunities for global vegetation monitoring. Intelligent sampling through active learning (AL) heuristics provides a pathway for fast inference of essential vegetation variables by means of hybrid retrieval approaches, i.e., machine learning regression algorithms trained by radiative transfer model (RTM) simulations. In this study we summarize AL theory and perform a brief systematic literature survey about AL heuristics used in the context of Earth observation regression problems over terrestrial targets. Across all relevant studies it appeared that: (i) retrieval accuracy of AL-optimized training data sets outperformed models trained over large randomly sampled data sets, and (ii) Euclidean distance-based (EBD) diversity method tends to be the most efficient AL technique in terms of accuracy and computational demand. Additionally, a case study is presented based on experimental data employing both uncertainty and diversity AL criteria. Hereby, a a simulated training data base by the PROSAIL-PRO canopy RTM is used to demonstrate the benefit of AL techniques for the estimation of total leaf carotenoid content (Cxc) and leaf water content (Cw). Gaussian process regression (GPR) was incorporated to minimize and optimize the training data set with AL. Training the GPR algorithm on optimally AL-based sampled data sets led to improved variable retrievals compared to training on full data pools, which is further demonstrated on a mapping example. From these findings we can recommend the use of AL-based sub-sampling procedures to select the most informative samples out of large training data pools. This will not only optimize regression accuracy due to exclusion of redundant information, but also speed up processing time and reduce final model size of kernel-based machine learning regression algorithms, such as GPR. With this study we want to encourage further testing and implementation of AL sampling methods for hybrid retrieval workflows. AL can contribute to the solution of regression problems within the framework of operational vegetation monitoring using satellite imaging spectroscopy data, and may strongly facilitate data processing for cloud-computing platforms.}, } @article {pmid36504549, year = {2021}, author = {Hanke, M and Pestilli, F and Wagner, AS and Markiewicz, CJ and Poline, JB and Halchenko, YO}, title = {In defense of decentralized research data management.}, journal = {Neuroforum}, volume = {27}, number = {1}, pages = {17-25}, pmid = {36504549}, issn = {2363-7013}, support = {R24 MH117179/MH/NIMH NIH HHS/United States ; R01 MH083320/MH/NIMH NIH HHS/United States ; RF1 MH120021/MH/NIMH NIH HHS/United States ; P41 EB019936/EB/NIBIB NIH HHS/United States ; R01 MH096906/MH/NIMH NIH HHS/United States ; }, abstract = {Decentralized research data management (dRDM) systems handle digital research objects across participating nodes without critically relying on central services. We present four perspectives in defense of dRDM, illustrating that, in contrast to centralized or federated research data management solutions, a dRDM system based on heterogeneous but interoperable components can offer a sustainable, resilient, inclusive, and adaptive infrastructure for scientific stakeholders: An individual scientist or laboratory, a research institute, a domain data archive or cloud computing platform, and a collaborative multisite consortium. All perspectives share the use of a common, self-contained, portable data structure as an abstraction from current technology and service choices. In conjunction, the four perspectives review how varying requirements of independent scientific stakeholders can be addressed by a scalable, uniform dRDM solution and present a working system as an exemplary implementation.}, } @article {pmid35382513, year = {2020}, author = {Adedolapo, O and Huichen, Y and Avishek, B and William, H and Dan, A and Mohammed, T}, title = {Feature Selection for Learning to Predict Outcomes of Compute Cluster Jobs with Application to Decision Support.}, journal = {Proceedings. International Conference on Computational Science and Computational Intelligence}, volume = {2020}, number = {}, pages = {1231-1236}, pmid = {35382513}, issn = {2769-5654}, support = {P20 GM113109/GM/NIGMS NIH HHS/United States ; }, abstract = {We present a machine learning framework and a new test bed for data mining from the Slurm Workload Manager for high-performance computing (HPC) clusters. The focus was to find a method for selecting features to support decisions: helping users decide whether to resubmit failed jobs with boosted CPU and memory allocations or migrate them to a computing cloud. This task was cast as both supervised classification and regression learning, specifically, sequential problem solving suitable for reinforcement learning. Selecting relevant features can improve training accuracy, reduce training time, and produce a more comprehensible model, with an intelligent system that can explain predictions and inferences. We present a supervised learning model trained on a Simple Linux Utility for Resource Management (Slurm) data set of HPC jobs using three different techniques for selecting features: linear regression, lasso, and ridge regression. Our data set represented both HPC jobs that failed and those that succeeded, so our model was reliable, less likely to overfit, and generalizable. Our model achieved an R[2] of 95% with 99% accuracy. We identified five predictors for both CPU and memory properties.}, } @article {pmid35983015, year = {2020}, author = {Rizzo, JR and Feng, C and Riewpaiboon, W and Mongkolwat, P}, title = {A Low-Vision Navigation Platform for Economies in Transition Countries.}, journal = {Proceedings IEEE World Congress on Services (SERVICES). IEEE World Congress on Services}, volume = {2020}, number = {}, pages = {1-3}, pmid = {35983015}, issn = {2642-939X}, support = {R21 EY033689/EY/NEI NIH HHS/United States ; }, abstract = {An ability to move freely, when wanted, is an essential activity for healthy living. Visually impaired and completely blinded persons encounter many disadvantages in their day-to-day activities, including performing work-related tasks. They are at risk of mobility losses, illness, debility, social isolation, and premature mortality. A novel wearable device and computing platform called VIS[4]ION is reducing the disadvantage gaps and raising living standards for the visually challenged. It provides personal mobility navigational services that serves as a customizable, human-in-the-loop, sensing-to-feedback platform to deliver functional assistance. The platform is configured as a wearable that provides on-board microcomputers, human-machine interfaces, and sensory augmentation. Mobile edge computing enhances functionality as more services are unleashed with the computational gains. The meta-level goal is to support spatial cognition, personal freedom, and activities, and to promoting health and wellbeing. VIS[4]ION can be conceptualized as the dovetailing of two thrusts: an on-person navigational and computing device and a multimodal functional aid providing microservices through the cloud. The device has on-board wireless capabilities connected through Wi-Fi or 4/5G. The cloud-based microservices reduce hardware and power requirements while allowing existing and new services to be enhanced and added such as loading new map and real-time communication via haptic or audio signals. This technology can be made available and affordable in the economies of transition countries.}, } @article {pmid35582325, year = {2022}, author = {Yassine, A and Hossain, MS}, title = {COVID-19 Networking Demand: An Auction-Based Mechanism for Automated Selection of Edge Computing Services.}, journal = {IEEE transactions on network science and engineering}, volume = {9}, number = {1}, pages = {308-318}, pmid = {35582325}, issn = {2327-4697}, abstract = {Network and cloud service providers are facing an unprecedented challenge to meet the demand of end-users during the COVID-19 pandemic. Currently, billions of people around the world are ordered to stay at home and use remote connection technologies to prevent the spread of the disease. The COVID-19 crisis brought a new reality to network service providers that will eventually accelerate the deployment of edge computing resources to attract the massive influx of users' traffic. The user can elect to procure its resource needs from any edge computing provider based on a variety of attributes such as price and quality. The main challenge for the user is how to choose between the price and multiple quality of service deals when such offerings are changing continually. This problem falls under multi-attribute decision-making. This paper investigates and proposes a novel auction mechanism by which network service brokers would be able to automate the selection of edge computing offers to support their end-users. We also propose a multi-attribute decision-making model that allows the broker to maximize its utility when several bids from edge-network providers are present. The evaluation and experimentation show the practicality and robustness of the proposed model.}, } @article {pmid35939281, year = {2020}, author = {Kaplan, M and Kneifel, C and Orlikowski, V and Dorff, J and Newton, M and Howard, A and Shinn, D and Bishawi, M and Chidyagwai, S and Balogh, P and Randles, A}, title = {Cloud Computing for COVID-19: Lessons Learned From Massively Parallel Models of Ventilator Splitting.}, journal = {Computing in science & engineering}, volume = {22}, number = {6}, pages = {37-47}, pmid = {35939281}, issn = {1521-9615}, abstract = {A patient-specific airflow simulation was developed to help address the pressing need for an expansion of the ventilator capacity in response to the COVID-19 pandemic. The computational model provides guidance regarding how to split a ventilator between two or more patients with differing respiratory physiologies. To address the need for fast deployment and identification of optimal patient-specific tuning, there was a need to simulate hundreds of millions of different clinically relevant parameter combinations in a short time. This task, driven by the dire circumstances, presented unique computational and research challenges. We present here the guiding principles and lessons learned as to how a large-scale and robust cloud instance was designed and deployed within 24 hours and 800 000 compute hours were utilized in a 72-hour period. We discuss the design choices to enable a quick turnaround of the model, execute the simulation, and create an intuitive and interactive interface.}, } @article {pmid34812355, year = {2020}, author = {Kolhar, M and Al-Turjman, F and Alameen, A and Abualhaj, MM}, title = {A Three Layered Decentralized IoT Biometric Architecture for City Lockdown During COVID-19 Outbreak.}, journal = {IEEE access : practical innovations, open solutions}, volume = {8}, number = {}, pages = {163608-163617}, pmid = {34812355}, issn = {2169-3536}, abstract = {In this article, we have built a prototype of a decentralized IoT based biometric face detection framework for cities that are under lockdown during COVID-19 outbreaks. To impose restrictions on public movements, we have utilized face detection using three-layered edge computing architecture. We have built a deep learning framework of multi-task cascading to recognize the face. For the face detection proposal we have compared with the state of the art methods on various benchmarking dataset such as FDDB and WIDER FACE. Furthermore, we have also conducted various experiments on latency and face detection load on three-layer and cloud computing architectures. It shows that our proposal has an edge over cloud computing architecture.}, } @article {pmid34976554, year = {2020}, author = {Hussain, AA and Bouachir, O and Al-Turjman, F and Aloqaily, M}, title = {AI Techniques for COVID-19.}, journal = {IEEE access : practical innovations, open solutions}, volume = {8}, number = {}, pages = {128776-128795}, pmid = {34976554}, issn = {2169-3536}, abstract = {Artificial Intelligence (AI) intent is to facilitate human limits. It is getting a standpoint on human administrations, filled by the growing availability of restorative clinical data and quick progression of insightful strategies. Motivated by the need to highlight the need for employing AI in battling the COVID-19 Crisis, this survey summarizes the current state of AI applications in clinical administrations while battling COVID-19. Furthermore, we highlight the application of Big Data while understanding this virus. We also overview various intelligence techniques and methods that can be applied to various types of medical information-based pandemic. We classify the existing AI techniques in clinical data analysis, including neural systems, classical SVM, and edge significant learning. Also, an emphasis has been made on regions that utilize AI-oriented cloud computing in combating various similar viruses to COVID-19. This survey study is an attempt to benefit medical practitioners and medical researchers in overpowering their faced difficulties while handling COVID-19 big data. The investigated techniques put forth advances in medical data analysis with an exactness of up to 90%. We further end up with a detailed discussion about how AI implementation can be a huge advantage in combating various similar viruses.}, } @article {pmid35662897, year = {2020}, author = {Shao, D and Kellogg, G and Mahony, S and Lai, W and Pugh, BF}, title = {PEGR: a management platform for ChIP-based next generation sequencing pipelines.}, journal = {PEARC20 : Practice and Experience in Advanced Research Computing 2020 : Catch the wave : July 27-31, 2020, Portland, Or Virtual Conference. Practice and Experience in Advanced Research Computing (Conference) (2020 : Online)}, volume = {2020}, number = {}, pages = {285-292}, pmid = {35662897}, support = {R01 ES013768/ES/NIEHS NIH HHS/United States ; R01 GM125722/GM/NIGMS NIH HHS/United States ; }, abstract = {There has been a rapid development in genome sequencing, including high-throughput next generation sequencing (NGS) technologies, automation in biological experiments, new bioinformatics tools and utilization of high-performance computing and cloud computing. ChIP-based NGS technologies, e.g. ChIP-seq and ChIP-exo, are widely used to detect the binding sites of DNA-interacting proteins in the genome and help us to have a deeper mechanistic understanding of genomic regulation. As sequencing data is generated at an unprecedented pace from the ChIP-based NGS pipelines, there is an urgent need for a metadata management system. To meet this need, we developed the Platform for Eukaryotic Genomic Regulation (PEGR), a web service platform that logs metadata for samples and sequencing experiments, manages the data processing workflows, and provides reporting and visualization. PEGR links together people, samples, protocols, DNA sequencers and bioinformatics computation. With the help of PEGR, scientists can have a more integrated understanding of the sequencing data and better understand the scientific mechanisms of genomic regulation. In this paper, we present the architecture and the major functionalities of PEGR. We also share our experience in developing this application and discuss the future directions.}, } @article {pmid35615582, year = {2020}, author = {Choi, IK and Abeysinghe, E and Coulter, E and Marru, S and Pierce, M and Liu, X}, title = {TopPIC Gateway: A Web Gateway for Top-Down Mass Spectrometry Data Interpretation.}, journal = {PEARC20 : Practice and Experience in Advanced Research Computing 2020 : Catch the wave : July 27-31, 2020, Portland, Or Virtual Conference. Practice and Experience in Advanced Research Computing (Conference) (2020 : Online)}, volume = {2020}, number = {}, pages = {461-464}, pmid = {35615582}, support = {R01 GM118470/GM/NIGMS NIH HHS/United States ; U54 AG065181/AG/NIA NIH HHS/United States ; }, abstract = {Top-down mass spectrometry-based proteomics has become the method of choice for identifying and quantifying intact proteoforms in biological samples. We present a web-based gateway for TopPIC suite, a widely used software suite consisting of four software tools for top-down mass spectrometry data interpretation: TopFD, TopPIC, TopMG, and TopDiff. The gateway enables the community to use heterogeneous collection of computing resources that includes high performance computing clusters at Indiana University and virtual clusters on XSEDE's Jetstream Cloud resource for top-down mass spectral data analysis using TopPIC suite. The gateway will be a useful resource for proteomics researchers and students who have limited access to high-performance computing resources or who are not familiar with interacting with server-side supercomputers.}, } @article {pmid35098264, year = {2020}, author = {Sivagnanam, S and Gorman, W and Doherty, D and Neymotin, SA and Fang, S and Hovhannisyan, H and Lytton, WW and Dura-Bernal, S}, title = {Simulating Large-scale Models of Brain Neuronal Circuits using Google Cloud Platform.}, journal = {PEARC20 : Practice and Experience in Advanced Research Computing 2020 : Catch the wave : July 27-31, 2020, Portland, Or Virtual Conference. Practice and Experience in Advanced Research Computing (Conference) (2020 : Online)}, volume = {2020}, number = {}, pages = {505-509}, pmid = {35098264}, support = {R01 DC012947/DC/NIDCD NIH HHS/United States ; U01 EB017695/EB/NIBIB NIH HHS/United States ; U24 EB028998/EB/NIBIB NIH HHS/United States ; }, abstract = {Biophysically detailed modeling provides an unmatched method to integrate data from many disparate experimental studies, and manipulate and explore with high precision the resultin brain circuit simulation. We developed a detailed model of the brain motor cortex circuits, simulating over 10,000 biophysically detailed neurons and 30 million synaptic connections. Optimization and evaluation of the cortical model parameters and responses was achieved via parameter exploration using grid search parameter sweeps and evolutionary algorithms. This involves running tens of thousands of simulations requiring significant computational resources. This paper describes our experience in setting up and using Google Compute Platform (GCP) with Slurm to run these large-scale simulations. We describe the best practices and solutions to the issues that arose during the process, and present preliminary results from running simulations on GCP.}, } @article {pmid35095187, year = {2018}, author = {Thorsen, TJ and Kato, S and Loeb, NG and Rose, FG}, title = {Observation-Based Decomposition of Radiative Perturbations and Radiative Kernels.}, journal = {Journal of climate}, volume = {31}, number = {24}, pages = {10039-10058}, pmid = {35095187}, issn = {0894-8755}, support = {/SCMD-EarthScienceSystem/Science Earth Science System NASA/United States ; }, abstract = {The Clouds and the Earth's Radiant Energy System (CERES)-partial radiative perturbation [PRP (CERES-PRP)] methodology applies partial-radiative-perturbation-like calculations to observational datasets to directly isolate the individual cloud, atmospheric, and surface property contributions to the variability of the radiation budget. The results of these calculations can further be used to construct radiative kernels. A suite of monthly mean observation-based inputs are used for the radiative transfer, including cloud properties from either the diurnally resolved passive-sensor-based CERES synoptic (SYN) data or the combination of the CloudSat cloud radar and Cloud-Aerosol Lidar and Infrared Pathfinder Satellite Observations (CALIPSO) lidar. The CloudSat/CALIPSO cloud profiles are incorporated via a clustering method that obtains monthly mean cloud properties suitable for accurate radiative transfer calculations. The computed fluxes are validated using the TOA fluxes observed by CERES. Applications of the CERES-PRP methodology are demonstrated by computing the individual contributions to the variability of the radiation budget over multiple years and by deriving water vapor radiative kernels. The calculations for the former are used to show that an approximately linear decomposition of the total flux anomalies is achieved. The observation-based water vapor kernels were used to investigate the accuracy of the GCM-based NCAR CAM3.0 water vapor kernel. Differences between our observation-based kernel and the NCAR one are marginally larger than those inferred by previous comparisons among different GCM kernels.}, } @article {pmid35095126, year = {2018}, author = {Zhao, G and Gao, H}, title = {Automatic correction of contaminated images for assessment of reservoir surface area dynamics.}, journal = {Geophysical research letters}, volume = {45}, number = {12}, pages = {6092-6099}, pmid = {35095126}, issn = {0094-8276}, support = {80NSSC17K0358/ImNASA/Intramural NASA/United States ; 80NSSC18K0939/ImNASA/Intramural NASA/United States ; }, abstract = {The potential of using Landsat for assessing long-term water surface dynamics of individual reservoirs at a global scale has been significantly hindered by contaminations from clouds, cloud shadows, and terrain shadows. A novel algorithm was developed towards the automatic correction of these contaminated image classifications. By applying this algorithm to the dataset by Pekel et al. (2016), time series of area values for 6817 global reservoirs (with an integrated capacity of 6099 km[3]) were generated from 1984 to 2015. The number of effective images that can be used in each time series has been improved by 81% on average. The long-term average area for these global reservoirs was corrected from 1.73×10[5] km[2] to 3.94×10[5] km[2]. The results were proven to be robust through validation using observations, synthetic data, and visual inspection. This continuous reservoir surface area dataset can provide benefit to various applications (both at continental and local scales).}, } @article {pmid36658912, year = {2018}, author = {Xin, T and Huang, S and Lu, S and Li, K and Luo, Z and Yin, Z and Li, J and Lu, D and Long, G and Zeng, B}, title = {NMRCloudQ: a quantum cloud experience on a nuclear magnetic resonance quantum computer.}, journal = {Science bulletin}, volume = {63}, number = {1}, pages = {17-23}, doi = {10.1016/j.scib.2017.12.022}, pmid = {36658912}, issn = {2095-9281}, abstract = {Cloud-based quantum computing is anticipated to be the most useful and reachable form for public users to experience with the power of quantum. As initial attempts, IBM Q has launched influential cloud services on a superconducting quantum processor in 2016, but no other platforms has followed up yet. Here, we report our new cloud quantum computing service - NMRCloudQ (http://nmrcloudq.com/zh-hans/), where nuclear magnetic resonance, one of the pioneer platforms with mature techniques in experimental quantum computing, plays as the role of implementing computing tasks. Our service provides a comprehensive software environment preconfigured with a list of quantum information processing packages, and aims to be freely accessible to either amateurs that look forward to keeping pace with this quantum era or professionals that are interested in carrying out real quantum computing experiments in person. In our current version, four qubits are already usable with in average 99.10% single-qubit gate fidelity and 97.15% two-qubit fidelity via randomized benchmaking tests. Improved control precisions as well as a new seven-qubit processor are also in preparation and will be available later.}, } @article {pmid35531371, year = {2018}, author = {Xu, H and Yu, W and Griffith, D and Golmie, N}, title = {A Survey on Industrial Internet of Things: A Cyber-Physical Systems Perspective.}, journal = {IEEE access : practical innovations, open solutions}, volume = {6}, number = {}, pages = {}, pmid = {35531371}, issn = {2169-3536}, support = {9999-NIST/ImNIST/Intramural NIST DOC/United States ; }, abstract = {The vision of Industry 4.0, otherwise known as the fourth industrial revolution, is the integration of massively deployed smart computing and network technologies in industrial production and manufacturing settings for the purposes of automation, reliability, and control, implicating the development of an Industrial Internet of Things (I-IoT). Specifically, I-IoT is devoted to adopting the Internet of Things (IoT) to enable the interconnection of anything, anywhere, and at anytime in the manufacturing system context to improve the productivity, efficiency, safety and intelligence. As an emerging technology, I-IoT has distinct properties and requirements that distinguish it from consumer IoT, including the unique types of smart devices incorporated, network technologies and quality of service requirements, and strict needs of command and control. To more clearly understand the complexities of I-IoT and its distinct needs, and to present a unified assessment of the technology from a systems perspective, in this paper we comprehensively survey the body of existing research on I-IoT. Particularly, we first present the I-IoT architecture, I-IoT applications (i.e., factory automation (FA) and process automation (PA)) and their characteristics. We then consider existing research efforts from the three key systems aspects of control, networking and computing. Regarding control, we first categorize industrial control systems and then present recent and relevant research efforts. Next, considering networking, we propose a three-dimensional framework to explore the existing research space, and investigate the adoption of some representative networking technologies, including 5G, machine-to-machine (M2M) communication, and software defined networking (SDN). Similarly, concerning computing, we again propose a second three-dimensional framework that explores the problem space of computing in I-IoT, and investigate the cloud, edge, and hybrid cloud and edge computing platforms. Finally, we outline particular challenges and future research needs in control, networking, and computing systems, as well as for the adoption of machine learning, in an I-IoT context.}, } @article {pmid34430067, year = {2017}, author = {Maabreh, M and Qolomany, B and Alsmadi, I and Gupta, A}, title = {Deep Learning-based MSMS Spectra Reduction in Support of Running Multiple Protein Search Engines on Cloud.}, journal = {Proceedings. IEEE International Conference on Bioinformatics and Biomedicine}, volume = {2017}, number = {}, pages = {1909-1914}, pmid = {34430067}, issn = {2156-1125}, support = {R15 GM120820/GM/NIGMS NIH HHS/United States ; }, abstract = {The diversity of the available protein search engines with respect to the utilized matching algorithms, the low overlap ratios among their results and the disparity of their coverage encourage the community of proteomics to utilize ensemble solutions of different search engines. The advancing in cloud computing technology and the availability of distributed processing clusters can also provide support to this task. However, data transferring and results' combining, in this case, could be the major bottleneck. The flood of billions of observed mass spectra, hundreds of Gigabytes or potentially Terabytes of data, could easily cause the congestions, increase the risk of failure, poor performance, add more computations' cost, and waste available resources. Therefore, in this study, we propose a deep learning model in order to mitigate the traffic over cloud network and, thus reduce the cost of cloud computing. The model, which depends on the top 50 intensities and their m/z values of each spectrum, removes any spectrum which is predicted not to pass the majority voting of the participated search engines. Our results using three search engines namely: pFind, Comet and X!Tandem, and four different datasets are promising and promote the investment in deep learning to solve such type of Big data problems.}, } @article {pmid36937228, year = {2017}, author = {Navas-Molina, JA and Hyde, ER and Sanders, J and Knight, R}, title = {The Microbiome and Big Data.}, journal = {Current opinion in systems biology}, volume = {4}, number = {}, pages = {92-96}, pmid = {36937228}, issn = {2452-3100}, support = {P01 DK078669/DK/NIDDK NIH HHS/United States ; R01 HG004872/HG/NHGRI NIH HHS/United States ; U01 HG004866/HG/NHGRI NIH HHS/United States ; U01 HG006537/HG/NHGRI NIH HHS/United States ; }, abstract = {Microbiome datasets have expanded rapidly in recent years. Advances in DNA sequencing, as well as the rise of shotgun metagenomics and metabolomics, are producing datasets that exceed the ability of researchers to analyze them on their personal computers. Here we describe what Big Data is in the context of microbiome research, how this data can be transformed into knowledge about microbes and their functions in their environments, and how the knowledge can be applied to move microbiome research forward. In particular, the development of new high-resolution tools to assess strain-level variability (moving away from OTUs), the advent of cloud computing and centralized analysis resources such as Qiita (for sequences) and GNPS (for mass spectrometry), and better methods for curating and describing "metadata" (contextual information about the sequence or chemical information) are rapidly assisting the use of microbiome data in fields ranging from human health to environmental studies.}, } @article {pmid34423340, year = {2016}, author = {Goonasekera, N and Lonie, A and Taylor, J and Afgan, E}, title = {CloudBridge: a Simple Cross-Cloud Python Library.}, journal = {Proceedings of XSEDE16 : Diversity, Big Data, and Science at Scale : July 17-21, 2016, Intercontinental Miami Hotel, Miami, Florida, USA. Conference on Extreme Science and Engineering Discovery Environment (5th : 2016 : Miami, Fla.)}, volume = {2016}, number = {}, pages = {}, doi = {10.1145/2949550.2949648}, pmid = {34423340}, support = {U01 CA184826/CA/NCI NIH HHS/United States ; U24 HG006620/HG/NHGRI NIH HHS/United States ; U41 HG006620/HG/NHGRI NIH HHS/United States ; }, abstract = {With clouds becoming a standard target for deploying applications, it is more important than ever to be able to seamlessly utilise resources and services from multiple providers. Proprietary vendor APIs make this challenging and lead to conditional code being written to accommodate various API differences, requiring application authors to deal with these complexities and to test their applications against each supported cloud. In this paper, we describe an open source Python library called CloudBridge that provides a simple, uniform, and extensible API for multiple clouds. The library defines a standard 'contract' that all supported providers must implement, and an extensive suite of conformance tests to ensure that any exposed behavior is uniform across cloud providers, thus allowing applications to confidently utilise any of the supported clouds without any cloud-specific code or testing.}, } @article {pmid34875801, year = {2011}, author = {O'Leary, MA and Kaufman, S}, title = {MorphoBank: phylophenomics in the "cloud".}, journal = {Cladistics : the international journal of the Willi Hennig Society}, volume = {27}, number = {5}, pages = {529-537}, doi = {10.1111/j.1096-0031.2011.00355.x}, pmid = {34875801}, issn = {1096-0031}, abstract = {A highly interoperable informatics infrastructure rapidly emerged to handle genomic data used for phylogenetics and was instrumental in the growth of molecular systematics. Parallel growth in software and databases to address needs peculiar to phylophenomics has been relatively slow and fragmented. Systematists currently face the challenge that Earth may hold tens of millions of species (living and fossil) to be described and classified. Grappling with research on this scale has increasingly resulted in work by teams, many constructing large phenomic supermatrices. Until now, phylogeneticists have managed data in single-user, file-based desktop software wholly unsuitable for real-time, team-based collaborative work. Furthermore, phenomic data often differ from genomic data in readily lending themselves to media representation (e.g. 2D and 3D images, video, sound). Phenomic data are a growing component of phylogenetics, and thus teams require the ability to record homology hypotheses using media and to share and archive these data. Here we describe MorphoBank, a web application and database leveraging software as a service methodology compatible with "cloud" computing technology for the construction of matrices of phenomic data. In its tenth year, and fully available to the scientific community at-large since inception, MorphoBank enables interactive collaboration not possible with desktop software, permitting self-assembling teams to develop matrices, in real time, with linked media in a secure web environment. MorphoBank also provides any user with tools to build character and media ontologies (rule sets) within matrices, and to display these as directed acyclic graphs. These rule sets record the phylogenetic interrelatedness of characters (e.g. if X is absent, Y is inapplicable, or X-Z characters share a media view). MorphoBank has enabled an order of magnitude increase in phylophenomic data collection: a recent collaboration by more than 25 researchers has produced a database of > 4500 phenomic characters supported by > 10 000 media. © The Willi Hennig Society 2011.}, } @article {pmid34409117, year = {2021}, author = {Miao, Y and Hao, Y and Chen, M and Gharavi, H and Hwang, K}, title = {Intelligent Task Caching in Edge Cloud via Bandit Learning.}, journal = {IEEE transactions on network science and engineering}, volume = {8}, number = {1}, pages = {}, doi = {10.1109/tnse.2020.3047417}, pmid = {34409117}, issn = {2327-4697}, support = {9999-NIST/ImNIST/Intramural NIST DOC/United States ; }, abstract = {Task caching, based on edge cloud, aims to meet the latency requirements of computation-intensive and data-intensive tasks (such as augmented reality). However, current task caching strategies are generally based on the unrealistic assumption of knowing the pattern of user task requests and ignoring the fact that a task request pattern is more user specific (e.g., the mobility and personalized task demand). Moreover, it disregards the impact of task size and computing amount on the caching strategy. To investigate these issues, in this paper, we first formalize the task caching problem as a non-linear integer programming problem to minimize task latency. We then design a novel intelligent task caching algorithm based on a multiarmed bandit algorithm, called M-adaptive upper confidence bound (M-AUCB). The proposed caching strategy cannot only learn the task patterns of mobile device requests online, but can also dynamically adjust the caching strategy to incorporate the size and computing amount of each task. Moreover, we prove that the M-AUCB algorithm achieves a sublinear regret bound. The results show that, compared with other task caching schemes, the M-AUCB algorithm reduces the average task latency by at least 14.8%.}, } @article {pmid34407387, year = {2021}, author = {Fox, CB and Israelsen-Augenstein, M and Jones, S and Gillam, SL}, title = {An Evaluation of Expedited Transcription Methods for School-Age Children's Narrative Language: Automatic Speech Recognition and Real-Time Transcription.}, journal = {Journal of speech, language, and hearing research : JSLHR}, volume = {64}, number = {9}, pages = {3533-3548}, doi = {10.1044/2021_JSLHR-21-00096}, pmid = {34407387}, issn = {1558-9102}, mesh = {Child ; Humans ; Reproducibility of Results ; Schools ; Speech ; *Speech Perception ; *Speech-Language Pathology/education ; }, abstract = {Purpose This study examined the accuracy and potential clinical utility of two expedited transcription methods for narrative language samples elicited from school-age children (7;5-11;10 [years;months]) with developmental language disorder. Transcription methods included real-time transcription produced by speech-language pathologists (SLPs) and trained transcribers (TTs) as well as Google Cloud Speech automatic speech recognition. Method The accuracy of each transcription method was evaluated against a gold-standard reference corpus. Clinical utility was examined by determining the reliability of scores calculated from the transcripts produced by each method on several language sample analysis (LSA) measures. Participants included seven certified SLPs and seven TTs. Each participant was asked to produce a set of six transcripts in real time, out of a total 42 language samples. The same 42 samples were transcribed using Google Cloud Speech. Transcription accuracy was evaluated through word error rate. Reliability of LSA scores was determined using correlation analysis. Results Results indicated that Google Cloud Speech was significantly more accurate than real-time transcription in transcribing narrative samples and was not impacted by speech rate of the narrator. In contrast, SLP and TT transcription accuracy decreased as a function of increasing speech rate. LSA metrics generated from Google Cloud Speech transcripts were also more reliably calculated. Conclusions Automatic speech recognition showed greater accuracy and clinical utility as an expedited transcription method than real-time transcription. Though there is room for improvement in the accuracy of speech recognition for the purpose of clinical transcription, it produced highly reliable scores on several commonly used LSA metrics. Supplemental Material https://doi.org/10.23641/asha.15167355.}, } @article {pmid34407145, year = {2021}, author = {Edwards, T and Jones, CB and Perkins, SE and Corcoran, P}, title = {Passive citizen science: The role of social media in wildlife observations.}, journal = {PloS one}, volume = {16}, number = {8}, pages = {e0255416}, pmid = {34407145}, issn = {1932-6203}, mesh = {*Social Media ; *Citizen Science ; Animals ; *Biodiversity ; Animals, Wild ; United Kingdom ; Introduced Species ; }, abstract = {Citizen science plays an important role in observing the natural environment. While conventional citizen science consists of organized campaigns to observe a particular phenomenon or species there are also many ad hoc observations of the environment in social media. These data constitute a valuable resource for 'passive citizen science'-the use of social media that are unconnected to any particular citizen science program, but represent an untapped dataset of ecological value. We explore the value of passive citizen science, by evaluating species distributions using the photo sharing site Flickr. The data are evaluated relative to those submitted to the National Biodiversity Network (NBN) Atlas, the largest collection of species distribution data in the UK. Our study focuses on the 1500 best represented species on NBN, and common invasive species within UK, and compares the spatial and temporal distribution with NBN data. We also introduce an innovative image verification technique that uses the Google Cloud Vision API in combination with species taxonomic data to determine the likelihood that a mention of a species on Flickr represents a given species. The spatial and temporal analyses for our case studies suggest that the Flickr dataset best reflects the NBN dataset when considering a purely spatial distribution with no time constraints. The best represented species on Flickr in comparison to NBN are diurnal garden birds as around 70% of the Flickr posts for them are valid observations relative to the NBN. Passive citizen science could offer a rich source of observation data for certain taxonomic groups, and/or as a repository for dedicated projects. Our novel method of validating Flickr records is suited to verifying more extensive collections, including less well-known species, and when used in combination with citizen science projects could offer a platform for accurate identification of species and their location.}, } @article {pmid34403339, year = {2021}, author = {Lv, C and Lin, W and Zhao, B}, title = {Approximate Intrinsic Voxel Structure for Point Cloud Simplification.}, journal = {IEEE transactions on image processing : a publication of the IEEE Signal Processing Society}, volume = {30}, number = {}, pages = {7241-7255}, doi = {10.1109/TIP.2021.3104174}, pmid = {34403339}, issn = {1941-0042}, abstract = {A point cloud as an information-intensive 3D representation usually requires a large amount of transmission, storage and computing resources, which seriously hinder its usage in many emerging fields. In this paper, we propose a novel point cloud simplification method, Approximate Intrinsic Voxel Structure (AIVS), to meet the diverse demands in real-world application scenarios. The method includes point cloud pre-processing (denoising and down-sampling), AIVS-based realization for isotropic simplification and flexible simplification with intrinsic control of point distance. To demonstrate the effectiveness of the proposed AIVS-based method, we conducted extensive experiments by comparing it with several relevant point cloud simplification methods on three public datasets, including Stanford, SHREC, and RGB-D scene models. The experimental results indicate that AIVS has great advantages over peers in terms of moving least squares (MLS) surface approximation quality, curvature-sensitive sampling, sharp-feature keeping and processing speed. The source code of the proposed method is publicly available. (https://github.com/vvvwo/AIVS-project).}, } @article {pmid34401476, year = {2021}, author = {Markus, A and Biro, M and Kecskemeti, G and Kertesz, A}, title = {Actuator behaviour modelling in IoT-Fog-Cloud simulation.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e651}, pmid = {34401476}, issn = {2376-5992}, abstract = {The inevitable evolution of information technology has led to the creation of IoT-Fog-Cloud systems, which combine the Internet of Things (IoT), Cloud Computing and Fog Computing. IoT systems are composed of possibly up to billions of smart devices, sensors and actuators connected through the Internet, and these components continuously generate large amounts of data. Cloud and fog services assist the data processing and storage needs of IoT devices. The behaviour of these devices can change dynamically (e.g. properties of data generation or device states). We refer to systems allowing behavioural changes in physical position (i.e. geolocation), as the Internet of Mobile Things (IoMT). The investigation and detailed analysis of such complex systems can be fostered by simulation solutions. The currently available, related simulation tools are lacking a generic actuator model including mobility management. In this paper, we present an extension of the DISSECT-CF-Fog simulator to support the analysis of arbitrary actuator events and mobility capabilities of IoT devices in IoT-Fog-Cloud systems. The main contributions of our work are: (i) a generic actuator model and its implementation in DISSECT-CF-Fog, and (ii) the evaluation of its use through logistics and healthcare scenarios. Our results show that we can successfully model IoMT systems and behavioural changes of actuators in IoT-Fog-Cloud systems in general, and analyse their management issues in terms of usage cost and execution time.}, } @article {pmid34401472, year = {2021}, author = {M, VK and Venkatachalam, K and P, P and Almutairi, A and Abouhawwash, M}, title = {Secure biometric authentication with de-duplication on distributed cloud storage.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e569}, pmid = {34401472}, issn = {2376-5992}, abstract = {Cloud computing is one of the evolving fields of technology, which allows storage, access of data, programs, and their execution over the internet with offering a variety of information related services. With cloud information services, it is essential for information to be saved securely and to be distributed safely across numerous users. Cloud information storage has suffered from issues related to information integrity, data security, and information access by unauthenticated users. The distribution and storage of data among several users are highly scalable and cost-efficient but results in data redundancy and security issues. In this article, a biometric authentication scheme is proposed for the requested users to give access permission in a cloud-distributed environment and, at the same time, alleviate data redundancy. To achieve this, a cryptographic technique is used by service providers to generate the bio-key for authentication, which will be accessible only to authenticated users. A Gabor filter with distributed security and encryption using XOR operations is used to generate the proposed bio-key (biometric generated key) and avoid data deduplication in the cloud, ensuring avoidance of data redundancy and security. The proposed method is compared with existing algorithms, such as convergent encryption (CE), leakage resilient (LR), randomized convergent encryption (RCE), secure de-duplication scheme (SDS), to evaluate the de-duplication performance. Our comparative analysis shows that our proposed scheme results in smaller computation and communication costs than existing schemes.}, } @article {pmid34398234, year = {2021}, author = {Bloom, JD}, title = {Recovery of Deleted Deep Sequencing Data Sheds More Light on the Early Wuhan SARS-CoV-2 Epidemic.}, journal = {Molecular biology and evolution}, volume = {38}, number = {12}, pages = {5211-5224}, pmid = {34398234}, issn = {1537-1719}, support = {S10 OD028685/OD/NIH HHS/United States ; S10OD028685/GF/NIH HHS/United States ; /HHMI/Howard Hughes Medical Institute/United States ; }, mesh = {*COVID-19/virology ; China ; *High-Throughput Nucleotide Sequencing ; Humans ; Phylogeny ; *SARS-CoV-2/genetics ; }, abstract = {The origin and early spread of SARS-CoV-2 remains shrouded in mystery. Here, I identify a data set containing SARS-CoV-2 sequences from early in the Wuhan epidemic that has been deleted from the NIH's Sequence Read Archive. I recover the deleted files from the Google Cloud and reconstruct partial sequences of 13 early epidemic viruses. Phylogenetic analysis of these sequences in the context of carefully annotated existing data further supports the idea that the Huanan Seafood Market sequences are not fully representative of the viruses in Wuhan early in the epidemic. Instead, the progenitor of currently known SARS-CoV-2 sequences likely contained three mutations relative to the market viruses that made it more similar to SARS-CoV-2's bat coronavirus relatives.}, } @article {pmid34395534, year = {2021}, author = {Honorato, RV and Koukos, PI and Jiménez-García, B and Tsaregorodtsev, A and Verlato, M and Giachetti, A and Rosato, A and Bonvin, AMJJ}, title = {Structural Biology in the Clouds: The WeNMR-EOSC Ecosystem.}, journal = {Frontiers in molecular biosciences}, volume = {8}, number = {}, pages = {729513}, pmid = {34395534}, issn = {2296-889X}, abstract = {Structural biology aims at characterizing the structural and dynamic properties of biological macromolecules at atomic details. Gaining insight into three dimensional structures of biomolecules and their interactions is critical for understanding the vast majority of cellular processes, with direct applications in health and food sciences. Since 2010, the WeNMR project (www.wenmr.eu) has implemented numerous web-based services to facilitate the use of advanced computational tools by researchers in the field, using the high throughput computing infrastructure provided by EGI. These services have been further developed in subsequent initiatives under H2020 projects and are now operating as Thematic Services in the European Open Science Cloud portal (www.eosc-portal.eu), sending >12 millions of jobs and using around 4,000 CPU-years per year. Here we review 10 years of successful e-infrastructure solutions serving a large worldwide community of over 23,000 users to date, providing them with user-friendly, web-based solutions that run complex workflows in structural biology. The current set of active WeNMR portals are described, together with the complex backend machinery that allows distributed computing resources to be harvested efficiently.}, } @article {pmid34393952, year = {2021}, author = {Aguirre Montero, A and López-Sánchez, JA}, title = {Intersection of Data Science and Smart Destinations: A Systematic Review.}, journal = {Frontiers in psychology}, volume = {12}, number = {}, pages = {712610}, pmid = {34393952}, issn = {1664-1078}, abstract = {This systematic review adopts a formal and structured approach to review the intersection of data science and smart tourism destinations in terms of components found in previous research. The study period corresponds to 1995-2021 focusing the analysis mainly on the last years (2015-2021), identifying and characterizing the current trends on this research topic. The review comprises documentary research based on bibliometric and conceptual analysis, using the VOSviewer and SciMAT software to analyze articles from the Web of Science database. There is growing interest in this research topic, with more than 300 articles published annually. Data science technologies on which current smart destinations research is based include big data, smart data, data analytics, social media, cloud computing, the internet of things (IoT), smart card data, geographic information system (GIS) technologies, open data, artificial intelligence, and machine learning. Critical research areas for data science techniques and technologies in smart destinations are public tourism marketing, mobility-accessibility, and sustainability. Data analysis techniques and technologies face unprecedented challenges and opportunities post-coronavirus disease-2019 (COVID-19) to build on the huge amount of data and a new tourism model that is more sustainable, smarter, and safer than those previously implemented.}, } @article {pmid34393357, year = {2020}, author = {Nour, B and Mastorakis, S and Mtibaa, A}, title = {Compute-Less Networking: Perspectives, Challenges, and Opportunities.}, journal = {IEEE network}, volume = {34}, number = {6}, pages = {259-265}, doi = {10.1109/mnet.011.2000180}, pmid = {34393357}, issn = {0890-8044}, support = {P20 GM109090/GM/NIGMS NIH HHS/United States ; }, abstract = {Delay-sensitive applications have been driving the move away from cloud computing, which cannot meet their low-latency requirements. Edge computing and programmable switches have been among the first steps toward pushing computation closer to end-users in order to reduce cost, latency, and overall resource utilization. This article presents the "compute-less" paradigm, which builds on top of the well known edge computing paradigm through a set of communication and computation optimization mechanisms (e.g.,, in-network computing, task clustering and aggregation, computation reuse). The main objective of the compute-less paradigm is to reduce the migration of computation and the usage of network and computing resources, while maintaining high Quality of Experience for end-users. We discuss the new perspectives, challenges, limitations, and opportunities of this compute-less paradigm.}, } @article {pmid34389135, year = {2021}, author = {Szamosfalvi, B and Heung, M and Yessayan, L}, title = {Technology Innovations in Continuous Kidney Replacement Therapy: The Clinician's Perspective.}, journal = {Advances in chronic kidney disease}, volume = {28}, number = {1}, pages = {3-12}, doi = {10.1053/j.ackd.2021.03.021}, pmid = {34389135}, issn = {1548-5609}, mesh = {*Acute Kidney Injury ; Anticoagulants ; *Continuous Renal Replacement Therapy ; Humans ; Intensive Care Units ; Renal Dialysis ; Technology ; }, abstract = {Continuous kidney replacement therapy (CKRT) has improved remarkably since its first implementation as continuous arteriovenous hemofiltration in the 1970s. However, when looking at the latest generation of CKRT machines, one could argue that clinical deployment of breakthrough innovations by device manufacturers has slowed in the last decade. Simultaneously, there has been a steady accumulation of clinical knowledge using CKRT as well as a multitude of therapeutic and diagnostic innovations in the dialysis and broader intensive care unit technology fields adaptable to CKRT. These include multiple different anticlotting measures; cloud-computing for optimized treatment prescribing and delivered therapy data collection and analysis; novel blood purification techniques aimed at improving the severe multiorgan dysfunction syndrome; and real-time sensing of blood and/or filter effluent composition. The authors present a view of how CKRT devices and programs could be reimagined incorporating these innovations to achieve specific measurable clinical outcomes with personalized care and improved simplicity, safety, and efficacy of CKRT therapy.}, } @article {pmid34383582, year = {2021}, author = {Ronquillo, JG and Lester, WT}, title = {Practical Aspects of Implementing and Applying Health Care Cloud Computing Services and Informatics to Cancer Clinical Trial Data.}, journal = {JCO clinical cancer informatics}, volume = {5}, number = {}, pages = {826-832}, pmid = {34383582}, issn = {2473-4276}, mesh = {*Cloud Computing ; Delivery of Health Care ; Ecosystem ; Humans ; Informatics ; *Neoplasms/diagnosis/epidemiology/therapy ; Precision Medicine ; }, abstract = {PURPOSE: Cloud computing has led to dramatic growth in the volume, variety, and velocity of cancer data. However, cloud platforms and services present new challenges for cancer research, particularly in understanding the practical tradeoffs between cloud performance, cost, and complexity. The goal of this study was to describe the practical challenges when using a cloud-based service to improve the cancer clinical trial matching process.

METHODS: We collected information for all interventional cancer clinical trials from ClinicalTrials.gov and used the Google Cloud Healthcare Natural Language Application Programming Interface (API) to analyze clinical trial Title and Eligibility Criteria text. An informatics pipeline leveraging interoperability standards summarized the distribution of cancer clinical trials, genes, laboratory tests, and medications extracted from cloud-based entity analysis.

RESULTS: There were a total of 38,851 cancer-related clinical trials found in this study, with the distribution of cancer categories extracted from Title text significantly different than in ClinicalTrials.gov (P < .001). Cloud-based entity analysis of clinical trial criteria identified a total of 949 genes, 1,782 laboratory tests, 2,086 medications, and 4,902 National Cancer Institute Thesaurus terms, with estimated detection accuracies ranging from 12.8% to 89.9%. A total of 77,702 API calls processed an estimated 167,179 text records, which took a total of 1,979 processing-minutes (33.0 processing-hours), or approximately 1.5 seconds per API call.

CONCLUSION: Current general-purpose cloud health care tools-like the Google service in this study-should not be used for automated clinical trial matching unless they can perform effective extraction and classification of the clinical, genetic, and medication concepts central to precision oncology research. A strong understanding of the practical aspects of cloud computing will help researchers effectively navigate the vast data ecosystems in cancer research.}, } @article {pmid34380380, year = {2021}, author = {Paul, G and Abele, ND and Kluth, K}, title = {A Review and Qualitative Meta-Analysis of Digital Human Modeling and Cyber-Physical-Systems in Ergonomics 4.0.}, journal = {IISE transactions on occupational ergonomics and human factors}, volume = {9}, number = {3-4}, pages = {111-123}, pmid = {34380380}, issn = {2472-5846}, mesh = {*Ergonomics ; Humans ; *Industry ; }, abstract = {Occupational ApplicationsFounded in an empirical case study and theoretical work, this paper reviews the scientific literature to define the role of Digital Human Modeling (DHM), Digital Twin (DT), and Cyber-Physical Systems (CPS) to inform the emerging concept of Ergonomics 4.0. We find that DHM evolved into DT is a core element in Ergonomics 4.0. A solid understanding and agreement on the nature of Ergonomics 4.0 is essential for the inclusion of ergonomic values and considerations in the larger conceptual framework of Industry 4.0. In this context, we invite Ergonomists from various disciplines to broaden their understanding and application of DHM and DT.}, } @article {pmid34376975, year = {2021}, author = {Koppad, S and B, A and Gkoutos, GV and Acharjee, A}, title = {Cloud Computing Enabled Big Multi-Omics Data Analytics.}, journal = {Bioinformatics and biology insights}, volume = {15}, number = {}, pages = {11779322211035921}, pmid = {34376975}, issn = {1177-9322}, abstract = {High-throughput experiments enable researchers to explore complex multifactorial diseases through large-scale analysis of omics data. Challenges for such high-dimensional data sets include storage, analyses, and sharing. Recent innovations in computational technologies and approaches, especially in cloud computing, offer a promising, low-cost, and highly flexible solution in the bioinformatics domain. Cloud computing is rapidly proving increasingly useful in molecular modeling, omics data analytics (eg, RNA sequencing, metabolomics, or proteomics data sets), and for the integration, analysis, and interpretation of phenotypic data. We review the adoption of advanced cloud-based and big data technologies for processing and analyzing omics data and provide insights into state-of-the-art cloud bioinformatics applications.}, } @article {pmid34372809, year = {2021}, author = {Chaudhuri, S and Han, H and Monaghan, C and Larkin, J and Waguespack, P and Shulman, B and Kuang, Z and Bellamkonda, S and Brzozowski, J and Hymes, J and Black, M and Kotanko, P and Kooman, JP and Maddux, FW and Usvyat, L}, title = {Real-time prediction of intradialytic relative blood volume: a proof-of-concept for integrated cloud computing infrastructure.}, journal = {BMC nephrology}, volume = {22}, number = {1}, pages = {274}, pmid = {34372809}, issn = {1471-2369}, mesh = {Blood Volume/*physiology ; *Body Fluid Compartments ; Cloud Computing ; Early Diagnosis ; Female ; Humans ; *Hypotension/diagnosis/etiology/prevention & control ; *Kidney Failure, Chronic/physiopathology/therapy ; *Machine Learning ; Male ; Middle Aged ; *Muscle Cramp/diagnosis/etiology/prevention & control ; Prognosis ; Proof of Concept Study ; *Renal Dialysis/adverse effects/methods ; *Vomiting/diagnosis/etiology/prevention & control ; }, abstract = {BACKGROUND: Inadequate refilling from extravascular compartments during hemodialysis can lead to intradialytic symptoms, such as hypotension, nausea, vomiting, and cramping/myalgia. Relative blood volume (RBV) plays an important role in adapting the ultrafiltration rate which in turn has a positive effect on intradialytic symptoms. It has been clinically challenging to identify changes RBV in real time to proactively intervene and reduce potential negative consequences of volume depletion. Leveraging advanced technologies to process large volumes of dialysis and machine data in real time and developing prediction models using machine learning (ML) is critical in identifying these signals.

METHOD: We conducted a proof-of-concept analysis to retrospectively assess near real-time dialysis treatment data from in-center patients in six clinics using Optical Sensing Device (OSD), during December 2018 to August 2019. The goal of this analysis was to use real-time OSD data to predict if a patient's relative blood volume (RBV) decreases at a rate of at least - 6.5 % per hour within the next 15 min during a dialysis treatment, based on 10-second windows of data in the previous 15 min. A dashboard application was constructed to demonstrate how reporting structures may be developed to alert clinicians in real time of at-risk cases. Data was derived from three sources: (1) OSDs, (2) hemodialysis machines, and (3) patient electronic health records.

RESULTS: Treatment data from 616 in-center dialysis patients in the six clinics was curated into a big data store and fed into a Machine Learning (ML) model developed and deployed within the cloud. The threshold for classifying observations as positive or negative was set at 0.08. Precision for the model at this threshold was 0.33 and recall was 0.94. The area under the receiver operating curve (AUROC) for the ML model was 0.89 using test data.

CONCLUSIONS: The findings from our proof-of concept analysis demonstrate the design of a cloud-based framework that can be used for making real-time predictions of events during dialysis treatments. Making real-time predictions has the potential to assist clinicians at the point of care during hemodialysis.}, } @article {pmid34372471, year = {2021}, author = {Ismail, L and Materwala, H}, title = {ESCOVE: Energy-SLA-Aware Edge-Cloud Computation Offloading in Vehicular Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {15}, pages = {}, pmid = {34372471}, issn = {1424-8220}, support = {31R215//National Water and Energy Center, United Arab Emirates University, United Arab Emirates/ ; }, abstract = {The vehicular network is an emerging technology in the Intelligent Smart Transportation era. The network provides mechanisms for running different applications, such as accident prevention, publishing and consuming services, and traffic flow management. In such scenarios, edge and cloud computing come into the picture to offload computation from vehicles that have limited processing capabilities. Optimizing the energy consumption of the edge and cloud servers becomes crucial. However, existing research efforts focus on either vehicle or edge energy optimization, and do not account for vehicular applications' quality of services. In this paper, we address this void by proposing a novel offloading algorithm, ESCOVE, which optimizes the energy of the edge-cloud computing platform. The proposed algorithm respects the Service level agreement (SLA) in terms of latency, processing and total execution times. The experimental results show that ESCOVE is a promising approach in energy savings while preserving SLAs compared to the state-of-the-art approach.}, } @article {pmid34370407, year = {2021}, author = {Gahm, NA and Rueden, CT and Evans, EL and Selzer, G and Hiner, MC and Chacko, JV and Gao, D and Sherer, NM and Eliceiri, KW}, title = {New Extensibility and Scripting Tools in the ImageJ Ecosystem.}, journal = {Current protocols}, volume = {1}, number = {8}, pages = {e204}, pmid = {34370407}, issn = {2691-1299}, support = {P41 GM135019/GM/NIGMS NIH HHS/United States ; T15 LM007359/LM/NLM NIH HHS/United States ; T32 CA009135/CA/NCI NIH HHS/United States ; }, mesh = {Algorithms ; *Ecosystem ; Humans ; *Image Processing, Computer-Assisted ; Microscopy, Fluorescence ; Software ; }, abstract = {ImageJ provides a framework for image processing across scientific domains while being fully open source. Over the years ImageJ has been substantially extended to support novel applications in scientific imaging as they emerge, particularly in the area of biological microscopy, with functionality made more accessible via the Fiji distribution of ImageJ. Within this software ecosystem, work has been done to extend the accessibility of ImageJ to utilize scripting, macros, and plugins in a variety of programming scenarios, e.g., from Groovy and Python and in Jupyter notebooks and cloud computing. We provide five protocols that demonstrate the extensibility of ImageJ for various workflows in image processing. We focus first on Fluorescence Lifetime Imaging Microscopy (FLIM) data, since this requires significant processing to provide quantitative insights into the microenvironments of cells. Second, we show how ImageJ can now be utilized for common image processing techniques, specifically image deconvolution and inversion, while highlighting the new, built-in features of ImageJ-particularly its capacity to run completely headless and the Ops matching feature that selects the optimal algorithm for a given function and data input, thereby enabling processing speedup. Collectively, these protocols can be used as a basis for automating biological image processing workflows. © 2021 Wiley Periodicals LLC. Basic Protocol 1: Using PyImageJ for FLIM data processing Alternate Protocol: Groovy FLIMJ in Jupyter Notebooks Basic Protocol 2: Using ImageJ Ops for image deconvolution Support Protocol 1: Using ImageJ Ops matching feature for image inversion Support Protocol 2: Headless ImageJ deconvolution.}, } @article {pmid34366566, year = {2022}, author = {Su, P and Chen, Y and Lu, M}, title = {Smart city information processing under internet of things and cloud computing.}, journal = {The Journal of supercomputing}, volume = {78}, number = {3}, pages = {3676-3695}, pmid = {34366566}, issn = {0920-8542}, abstract = {This study is to explore the smart city information (SCI) processing technology based on the Internet of Things (IoT) and cloud computing, promoting the construction of smart cities in the direction of effective sharing and interconnection. In this study, a SCI system is constructed based on the information islands in the smart construction of various fields in smart cities. The smart environment monitoring, smart transportation, and smart epidemic prevention at the application layer of the SCI system are designed separately. A multi-objective optimization algorithm for cloud computing virtual machine resource allocation method (CC-VMRA method) is proposed, and the application of the IoT and cloud computing technology in the smart city information system is further analysed and simulated for the performance verification. The results show that the multi-objective optimization algorithm in the CC-VMRA method can greatly reduce the number of physical servers in the SCI system (less than 20), and the variance is not higher than 0.0024, which can enable the server cluster to achieve better load balancing effects. In addition, the packet loss rate of the Zigbee protocol used by the IoT gateway in the SCI system is far below the 0.1% indicator, and the delay is less than 10 ms. Therefore, the SCI system constructed by this study shows low latency and high utilization rate, which can provide experimental reference for the later construction of smart city.}, } @article {pmid34359654, year = {2021}, author = {Prakash, A and Mahoney, KE and Orsburn, BC}, title = {Cloud Computing Based Immunopeptidomics Utilizing Community Curated Variant Libraries Simplifies and Improves Neo-Antigen Discovery in Metastatic Melanoma.}, journal = {Cancers}, volume = {13}, number = {15}, pages = {}, pmid = {34359654}, issn = {2072-6694}, abstract = {Unique peptide neo-antigens presented on the cell surface are attractive targets for researchers in nearly all areas of personalized medicine. Cells presenting peptides with mutated or other non-canonical sequences can be utilized for both targeted therapies and diagnostics. Today's state-of-the-art pipelines utilize complementary proteogenomic approaches where RNA or ribosomal sequencing data helps to create libraries from which tandem mass spectrometry data can be compared. In this study, we present an alternative approach whereby cloud computing is utilized to power neo-antigen searches against community curated databases containing more than 7 million human sequence variants. Using these expansive databases of high-quality sequences as a reference, we reanalyze the original data from two previously reported studies to identify neo-antigen targets in metastatic melanoma. Using our approach, we identify 79 percent of the non-canonical peptides reported by previous genomic analyses of these files. Furthermore, we report 18-fold more non-canonical peptides than previously reported. The novel neo-antigens we report herein can be corroborated by secondary analyses such as high predicted binding affinity, when analyzed by well-established tools such as NetMHC. Finally, we report 738 non-canonical peptides shared by at least five patient samples, and 3258 shared across the two studies. This illustrates the depth of data that is present, but typically missed by lower statistical power proteogenomic approaches. This large list of shared peptides across the two studies, their annotation, non-canonical origin, as well as MS/MS spectra from the two studies are made available on a web portal for community analysis.}, } @article {pmid34345198, year = {2022}, author = {Narayanan, KL and Krishnan, RS and Son, LH and Tung, NT and Julie, EG and Robinson, YH and Kumar, R and Gerogiannis, VC}, title = {Fuzzy Guided Autonomous Nursing Robot through Wireless Beacon Network.}, journal = {Multimedia tools and applications}, volume = {81}, number = {3}, pages = {3297-3325}, pmid = {34345198}, issn = {1380-7501}, abstract = {Robotics is one of the most emerging technologies today, and are used in a variety of applications, ranging from complex rocket technology to monitoring of crops in agriculture. Robots can be exceptionally useful in a smart hospital environment provided that they are equipped with improved vision capabilities for detection and avoidance of obstacles present in their path, thus allowing robots to perform their tasks without any disturbance. In the particular case of Autonomous Nursing Robots, major essential issues are effective robot path planning for the delivery of medicines to patients, measuring the patient body parameters through sensors, interacting with and informing the patient, by means of voice-based modules, about the doctors visiting schedule, his/her body parameter details, etc. This paper presents an approach of a complete Autonomous Nursing Robot which supports all the aforementioned tasks. In this paper, we present a new Autonomous Nursing Robot system capable of operating in a smart hospital environment area. The objective of the system is to identify the patient room, perform robot path planning for the delivery of medicines to a patient, and measure the patient body parameters, through a wireless BLE (Bluetooth Low Energy) beacon receiver and the BLE beacon transmitter at the respective patient rooms. Assuming that a wireless beacon is kept at the patient room, the robot follows the beacon's signal, identifies the respective room and delivers the needed medicine to the patient. A new fuzzy controller system which consists of three ultrasonic sensors and one camera is developed to detect the optimal robot path and to avoid the robot collision with stable and moving obstacles. The fuzzy controller effectively detects obstacles in the robot's vicinity and makes proper decisions for avoiding them. The navigation of the robot is implemented on a BLE tag module by using the AOA (Angle of Arrival) method. The robot uses sensors to measure the patient body parameters and updates these data to the hospital patient database system in a private cloud mode. It also makes uses of a Google assistant to interact with the patients. The robotic system was implemented on the Raspberry Pi using Matlab 2018b. The system performance was evaluated on a PC with an Intel Core i5 processor, while the solar power was used to power the system. Several sensors, namely HC-SR04 ultrasonic sensor, Logitech HD 720p image sensor, a temperature sensor and a heart rate sensor are used together with a camera to generate datasets for testing the proposed system. In particular, the system was tested on operations taking place in the context of a private hospital in Tirunelveli, Tamilnadu, India. A detailed comparison is performed, through some performance metrics, such as Correlation, Root Mean Square Error (RMSE), and Mean Absolute Percentage Error (MAPE), against the related works of Deepu et al., Huh and Seo, Chinmayi et al., Alli et al., Xu, Ran et al., and Lee et al. The experimental system validation showed that the fuzzy controller achieves very high accuracy in obstacle detection and avoidance, with a very low computational time for taking directional decisions. Moreover, the experimental results demonstrated that the robotic system achieves superior accuracy in detecting/avoiding obstacles compared to other systems of similar purposes presented in the related works.}, } @article {pmid34344669, year = {2023}, author = {Antaki, F and Coussa, RG and Kahwati, G and Hammamji, K and Sebag, M and Duval, R}, title = {Accuracy of automated machine learning in classifying retinal pathologies from ultra-widefield pseudocolour fundus images.}, journal = {The British journal of ophthalmology}, volume = {107}, number = {1}, pages = {90-95}, doi = {10.1136/bjophthalmol-2021-319030}, pmid = {34344669}, issn = {1468-2079}, mesh = {Humans ; *Artificial Intelligence ; ROC Curve ; Fundus Oculi ; Machine Learning ; Retina ; *Retinal Vein Occlusion ; }, abstract = {AIMS: Automated machine learning (AutoML) is a novel tool in artificial intelligence (AI). This study assessed the discriminative performance of AutoML in differentiating retinal vein occlusion (RVO), retinitis pigmentosa (RP) and retinal detachment (RD) from normal fundi using ultra-widefield (UWF) pseudocolour fundus images.

METHODS: Two ophthalmologists without coding experience carried out AutoML model design using a publicly available image data set (2137 labelled images). The data set was reviewed for low-quality and mislabeled images and then uploaded to the Google Cloud AutoML Vision platform for training and testing. We designed multiple binary models to differentiate RVO, RP and RD from normal fundi and compared them to bespoke models obtained from the literature. We then devised a multiclass model to detect RVO, RP and RD. Saliency maps were generated to assess the interpretability of the model.

RESULTS: The AutoML models demonstrated high diagnostic properties in the binary classification tasks that were generally comparable to bespoke deep-learning models (area under the precision-recall curve (AUPRC) 0.921-1, sensitivity 84.91%-89.77%, specificity 78.72%-100%). The multiclass AutoML model had an AUPRC of 0.876, a sensitivity of 77.93% and a positive predictive value of 82.59%. The per-label sensitivity and specificity, respectively, were normal fundi (91.49%, 86.75%), RVO (83.02%, 92.50%), RP (72.00%, 100%) and RD (79.55%,96.80%).

CONCLUSION: AutoML models created by ophthalmologists without coding experience can detect RVO, RP and RD in UWF images with very good diagnostic accuracy. The performance was comparable to bespoke deep-learning models derived by AI experts for RVO and RP but not for RD.}, } @article {pmid34343101, year = {2022}, author = {Tajalli, SZ and Kavousi-Fard, A and Mardaneh, M and Khosravi, A and Razavi-Far, R}, title = {Uncertainty-Aware Management of Smart Grids Using Cloud-Based LSTM-Prediction Interval.}, journal = {IEEE transactions on cybernetics}, volume = {52}, number = {10}, pages = {9964-9977}, doi = {10.1109/TCYB.2021.3089634}, pmid = {34343101}, issn = {2168-2275}, abstract = {This article introduces an uncertainty-aware cloud-fog-based framework for power management of smart grids using a multiagent-based system. The power management is a social welfare optimization problem. A multiagent-based algorithm is suggested to solve this problem, in which agents are defined as volunteering consumers and dispatchable generators. In the proposed method, every consumer can voluntarily put a price on its power demand at each interval of operation to benefit from the equal opportunity of contributing to the power management process provided for all generation and consumption units. In addition, the uncertainty analysis using a deep learning method is also applied in a distributive way with the local calculation of prediction intervals for sources with stochastic nature in the system, such as loads, small wind turbines (WTs), and rooftop photovoltaics (PVs). Using the predicted ranges of load demand and stochastic generation outputs, a range for power consumption/generation is also provided for each agent called "preparation range" to demonstrate the predicted boundary, where the accepted power consumption/generation of an agent might occur, considering the uncertain sources. Besides, fog computing is deployed as a critical infrastructure for fast calculation and providing local storage for reasonable pricing. Cloud services are also proposed for virtual applications as efficient databases and computation units. The performance of the proposed framework is examined on two smart grid test systems and compared with other well-known methods. The results prove the capability of the proposed method to obtain the optimal outcomes in a short time for any scale of grid.}, } @article {pmid34342466, year = {2021}, author = {Marques, G and Leswing, K and Robertson, T and Giesen, D and Halls, MD and Goldberg, A and Marshall, K and Staker, J and Morisato, T and Maeshima, H and Arai, H and Sasago, M and Fujii, E and Matsuzawa, NN}, title = {De Novo Design of Molecules with Low Hole Reorganization Energy Based on a Quarter-Million Molecule DFT Screen.}, journal = {The journal of physical chemistry. A}, volume = {125}, number = {33}, pages = {7331-7343}, doi = {10.1021/acs.jpca.1c04587}, pmid = {34342466}, issn = {1520-5215}, abstract = {Materials exhibiting higher mobilities than conventional organic semiconducting materials such as fullerenes and fused thiophenes are in high demand for applications in printed electronics. To discover new molecules in the heteroacene family that might show improved hole mobility, three de novo design methods were applied. Machine learning (ML) models were generated based on previously calculated hole reorganization energies of a quarter million examples of heteroacenes, where the energies were calculated by applying density functional theory (DFT) and a massive cloud computing environment. The three generative methods applied were (1) the continuous space method, where molecular structures are converted into continuous variables by applying the variational autoencoder/decoder technique; (2) the method based on reinforcement learning of SMILES strings (the REINVENT method); and (3) the junction tree variational autoencoder method that directly generates molecular graphs. Among the three methods, the second and third methods succeeded in obtaining chemical structures whose DFT-calculated hole reorganization energy was lower than the lowest energy in the training dataset. This suggests that an extrapolative materials design protocol can be developed by applying generative modeling to a quantitative structure-property relationship (QSPR) utility function.}, } @article {pmid34335730, year = {2021}, author = {Du, Z and Miao, H}, title = {Research on Edge Service Composition Method Based on BAS Algorithm.}, journal = {Computational intelligence and neuroscience}, volume = {2021}, number = {}, pages = {9931689}, pmid = {34335730}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; Computers ; Heuristics ; }, abstract = {Edge services are transferred data processing, application running, and implementation of some functional services from cloud central server to network edge server to provide services. Combined edge service can effectively reduce task computation in the cloud, shorten transmission distance of processing data, quickly decompose task of service request, and select the optimal edge service combination to provide service for users. BAS is an efficient intelligent optimization algorithm, which can achieve efficient optimization and neither need to know the specific form of function nor need gradient information. This paper designs an edge service composition model based on edge computing and proposes a method about edge service composition by BAS optimization algorithm. Our proposed method has obvious advantages in service composition efficiency compared with service composition method based on PSO or WPA heuristic algorithm. Compared with cloud service composition method, our proposed method has advantages of shorter service response time, low cost, and high quality of user experience.}, } @article {pmid34328586, year = {2021}, author = {Wang, Y and Murlidaran, S and Pearlman, DA}, title = {Quantum simulations of SARS-CoV-2 main protease M[pro] enable high-quality scoring of diverse ligands.}, journal = {Journal of computer-aided molecular design}, volume = {35}, number = {9}, pages = {963-971}, pmid = {34328586}, issn = {1573-4951}, support = {R43 GM140578/GM/NIGMS NIH HHS/United States ; }, mesh = {Antiviral Agents/*chemistry/metabolism ; Atazanavir Sulfate/chemistry/metabolism ; Binding Sites ; Cloud Computing ; Coronavirus 3C Proteases/*chemistry/*metabolism ; Density Functional Theory ; Hydrogen Bonding ; Ligands ; Molecular Docking Simulation ; Protein Conformation ; Quantum Theory ; }, abstract = {The COVID-19 pandemic has led to unprecedented efforts to identify drugs that can reduce its associated morbidity/mortality rate. Computational chemistry approaches hold the potential for triaging potential candidates far more quickly than their experimental counterparts. These methods have been widely used to search for small molecules that can inhibit critical proteins involved in the SARS-CoV-2 replication cycle. An important target is the SARS-CoV-2 main protease Mpro, an enzyme that cleaves the viral polyproteins into individual proteins required for viral replication and transcription. Unfortunately, standard computational screening methods face difficulties in ranking diverse ligands to a receptor due to disparate ligand scaffolds and varying charge states. Here, we describe full density functional quantum mechanical (DFT) simulations of Mpro in complex with various ligands to obtain absolute ligand binding energies. Our calculations are enabled by a new cloud-native parallel DFT implementation running on computational resources from Amazon Web Services (AWS). The results we obtain are promising: the approach is quite capable of scoring a very diverse set of existing drug compounds for their affinities to M pro and suggest the DFT approach is potentially more broadly applicable to repurpose screening against this target. In addition, each DFT simulation required only ~ 1 h (wall clock time) per ligand. The fast turnaround time raises the practical possibility of a broad application of large-scale quantum mechanics in the drug discovery pipeline at stages where ligand diversity is essential.}, } @article {pmid34326980, year = {2021}, author = {Li, X and Ren, S and Gu, F}, title = {Medical Internet of Things to Realize Elderly Stroke Prevention and Nursing Management.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {9989602}, pmid = {34326980}, issn = {2040-2309}, mesh = {Aged ; Cloud Computing ; Humans ; Internet ; *Internet of Things ; Middle Aged ; Remote Sensing Technology ; *Stroke/prevention & control ; *Telemedicine/methods ; }, abstract = {Stroke is a major disease that seriously endangers the lives and health of middle-aged and elderly people in our country, but its implementation of secondary prevention needs to be improved urgently. The application of IoT technology in home health monitoring and telemedicine, as well as the popularization of cloud computing, contributes to the early identification of ischemic stroke and provides intelligent, humanized, and preventive medical and health services for patients at high risk of stroke. This article clarifies the networking structure and networking objects of the rehabilitation system Internet of Things, clarifies the functions of each part, and establishes an overall system architecture based on smart medical care; the design and optimization of the mechanical part of the stroke rehabilitation robot are carried out, as well as kinematics and dynamic analysis. According to the functions of different types of stroke rehabilitation robots, strategies are given for the use of lower limb rehabilitation robots; standardized codes are used to identify system objects, and RFID technology is used to automatically identify users and devices. Combined with the use of the Internet and GSM mobile communication network, construct a network database of system networking objects and, on this basis, establish information management software based on a smart medical rehabilitation system that takes care of both doctors and patients to realize the system's Internet of Things architecture. In addition, this article also gives the recovery strategy generation in the system with the design method of resource scheduling method and the theoretical algorithm of rehabilitation strategy generation is given and verified. This research summarizes the application background, advantages, and past practice of the Internet of Things in stroke medical care, develops and applies a medical collaborative cloud computing system for systematic intervention of stroke, and realizes the module functions such as information sharing, regional monitoring, and collaborative consultation within the base.}, } @article {pmid34326863, year = {2021}, author = {Mrozek, D and Stępień, K and Grzesik, P and Małysiak-Mrozek, B}, title = {A Large-Scale and Serverless Computational Approach for Improving Quality of NGS Data Supporting Big Multi-Omics Data Analyses.}, journal = {Frontiers in genetics}, volume = {12}, number = {}, pages = {699280}, pmid = {34326863}, issn = {1664-8021}, abstract = {Various types of analyses performed over multi-omics data are driven today by next-generation sequencing (NGS) techniques that produce large volumes of DNA/RNA sequences. Although many tools allow for parallel processing of NGS data in a Big Data distributed environment, they do not facilitate the improvement of the quality of NGS data for a large scale in a simple declarative manner. Meanwhile, large sequencing projects and routine DNA/RNA sequencing associated with molecular profiling of diseases for personalized treatment require both good quality data and appropriate infrastructure for efficient storing and processing of the data. To solve the problems, we adapt the concept of Data Lake for storing and processing big NGS data. We also propose a dedicated library that allows cleaning the DNA/RNA sequences obtained with single-read and paired-end sequencing techniques. To accommodate the growth of NGS data, our solution is largely scalable on the Cloud and may rapidly and flexibly adjust to the amount of data that should be processed. Moreover, to simplify the utilization of the data cleaning methods and implementation of other phases of data analysis workflows, our library extends the declarative U-SQL query language providing a set of capabilities for data extraction, processing, and storing. The results of our experiments prove that the whole solution supports requirements for ample storage and highly parallel, scalable processing that accompanies NGS-based multi-omics data analyses.}, } @article {pmid34319675, year = {2021}, author = {Ashammakhi, N and Unluturk, BD and Kaarela, O and Akyildiz, IF}, title = {The Cells and the Implant Interact With the Biological System Via the Internet and Cloud Computing as the New Mediator.}, journal = {The Journal of craniofacial surgery}, volume = {32}, number = {5}, pages = {1655-1657}, pmid = {34319675}, issn = {1536-3732}, support = {UG3 TR003148/TR/NCATS NIH HHS/United States ; }, mesh = {*Cloud Computing ; *Dental Implants ; Internet ; Software ; }, } @article {pmid34314431, year = {2021}, author = {Niemann, M and Lachmann, N and Geneugelijk, K and Spierings, E}, title = {Computational Eurotransplant kidney allocation simulations demonstrate the feasibility and benefit of T-cell epitope matching.}, journal = {PLoS computational biology}, volume = {17}, number = {7}, pages = {e1009248}, pmid = {34314431}, issn = {1553-7358}, mesh = {Algorithms ; Cloud Computing ; Computational Biology ; Computer Simulation ; Epitopes, T-Lymphocyte/*immunology ; Europe ; Feasibility Studies ; Graft Survival/immunology ; Histocompatibility Testing/*methods/statistics & numerical data ; Humans ; Kidney Transplantation/*methods/statistics & numerical data ; Markov Chains ; Monte Carlo Method ; Time Factors ; Tissue and Organ Procurement/*methods/statistics & numerical data ; User-Computer Interface ; Waiting Lists ; }, abstract = {The EuroTransplant Kidney Allocation System (ETKAS) aims at allocating organs to patients on the waiting list fairly whilst optimizing HLA match grades. ETKAS currently considers the number of HLA-A, -B, -DR mismatches. Evidently, epitope matching is biologically and clinically more relevant. We here executed ETKAS-based computer simulations to evaluate the impact of epitope matching on allocation and compared the strategies. A virtual population of 400,000 individuals was generated using the National Marrow Donor Program (NMDP) haplotype frequency dataset of 2011. Using this population, a waiting list of 10,400 patients was constructed and maintained during simulation, matching the 2015 Eurotransplant Annual Report characteristics. Unacceptable antigens were assigned randomly relative to their frequency using HLAMatchmaker. Over 22,600 kidneys were allocated in 10 years in triplicate using Markov Chain Monte Carlo simulations on 32-CPU-core cloud-computing instances. T-cell epitopes were calculated using the www.pirche.com portal. Waiting list effects were evaluated against ETKAS for five epitope matching scenarios. Baseline simulations of ETKAS slightly overestimated reported average HLA match grades. The best balanced scenario maintained prioritisation of HLA A-B-DR fully matched donors while replacing the HLA match grade by PIRCHE-II score and exchanging the HLA mismatch probability (MMP) by epitope MMP. This setup showed no considerable impact on kidney exchange rates and waiting time. PIRCHE-II scores improved, whereas the average HLA match grade diminishes slightly, yet leading to an improved estimated graft survival. We conclude that epitope-based matching in deceased donor kidney allocation is feasible while maintaining equal balances on the waiting list.}, } @article {pmid34312582, year = {2021}, author = {Aslam, B and Javed, AR and Chakraborty, C and Nebhen, J and Raqib, S and Rizwan, M}, title = {Blockchain and ANFIS empowered IoMT application for privacy preserved contact tracing in COVID-19 pandemic.}, journal = {Personal and ubiquitous computing}, volume = {}, number = {}, pages = {1-17}, pmid = {34312582}, issn = {1617-4909}, abstract = {Life-threatening novel severe acute respiratory syndrome coronavirus (SARS-CoV-2), also known as COVID-19, has engulfed the world and caused health and economic challenges. To control the spread of COVID-19, a mechanism is required to enforce physical distancing between people. This paper proposes a Blockchain-based framework that preserves patients' anonymity while tracing their contacts with the help of Bluetooth-enabled smartphones. We use a smartphone application to interact with the proposed blockchain framework for contact tracing of the general public using Bluetooth and to store the obtained data over the cloud, which is accessible to health departments and government agencies to perform necessary and timely actions (e.g., like quarantine the infected people moving around). Thus, the proposed framework helps people perform their regular business and day-to-day activities with a controlled mechanism that keeps them safe from infected and exposed people. The smartphone application is capable enough to check their COVID status after analyzing the symptoms quickly and observes (based on given symptoms) either this person is infected or not. As a result, the proposed Adaptive Neuro-Fuzzy Interference System (ANFIS) system predicts the COVID status, and K-Nearest Neighbor (KNN) enhances the accuracy rate to 95.9% compared to state-of-the-art results.}, } @article {pmid34307859, year = {2021}, author = {Silva Junior, D and Pacitti, E and Paes, A and de Oliveira, D}, title = {Provenance-and machine learning-based recommendation of parameter values in scientific workflows.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e606}, pmid = {34307859}, issn = {2376-5992}, abstract = {Scientific Workflows (SWfs) have revolutionized how scientists in various domains of science conduct their experiments. The management of SWfs is performed by complex tools that provide support for workflow composition, monitoring, execution, capturing, and storage of the data generated during execution. In some cases, they also provide components to ease the visualization and analysis of the generated data. During the workflow's composition phase, programs must be selected to perform the activities defined in the workflow specification. These programs often require additional parameters that serve to adjust the program's behavior according to the experiment's goals. Consequently, workflows commonly have many parameters to be manually configured, encompassing even more than one hundred in many cases. Wrongly parameters' values choosing can lead to crash workflows executions or provide undesired results. As the execution of data- and compute-intensive workflows is commonly performed in a high-performance computing environment e.g., (a cluster, a supercomputer, or a public cloud), an unsuccessful execution configures a waste of time and resources. In this article, we present FReeP-Feature Recommender from Preferences, a parameter value recommendation method that is designed to suggest values for workflow parameters, taking into account past user preferences. FReeP is based on Machine Learning techniques, particularly in Preference Learning. FReeP is composed of three algorithms, where two of them aim at recommending the value for one parameter at a time, and the third makes recommendations for n parameters at once. The experimental results obtained with provenance data from two broadly used workflows showed FReeP usefulness in the recommendation of values for one parameter. Furthermore, the results indicate the potential of FReeP to recommend values for n parameters in scientific workflows.}, } @article {pmid34307857, year = {2021}, author = {Skarlat, O and Schulte, S}, title = {FogFrame: a framework for IoT application execution in the fog.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e588}, pmid = {34307857}, issn = {2376-5992}, abstract = {Recently, a multitude of conceptual architectures and theoretical foundations for fog computing have been proposed. Despite this, there is still a lack of concrete frameworks to setup real-world fog landscapes. In this work, we design and implement the fog computing framework FogFrame-a system able to manage and monitor edge and cloud resources in fog landscapes and to execute Internet of Things (IoT) applications. FogFrame provides communication and interaction as well as application management within a fog landscape, namely, decentralized service placement, deployment and execution. For service placement, we formalize a system model, define an objective function and constraints, and solve the problem implementing a greedy algorithm and a genetic algorithm. The framework is evaluated with regard to Quality of Service parameters of IoT applications and the utilization of fog resources using a real-world operational testbed. The evaluation shows that the service placement is adapted according to the demand and the available resources in the fog landscape. The greedy placement leads to the maximum utilization of edge devices keeping at the edge as many services as possible, while the placement based on the genetic algorithm keeps devices from overloads by balancing between the cloud and edge. When comparing edge and cloud deployment, the service deployment time at the edge takes 14% of the deployment time in the cloud. If fog resources are utilized at maximum capacity, and a new application request arrives with the need of certain sensor equipment, service deployment becomes impossible, and the application needs to be delegated to other fog resources. The genetic algorithm allows to better accommodate new applications and keep the utilization of edge devices at about 50% CPU. During the experiments, the framework successfully reacts to runtime events: (i) services are recovered when devices disappear from the fog landscape; (ii) cloud resources and highly utilized devices are released by migrating services to new devices; (iii) and in case of overloads, services are migrated in order to release resources.}, } @article {pmid34306054, year = {2021}, author = {Sauber, AM and Awad, A and Shawish, AF and El-Kafrawy, PM}, title = {A Novel Hadoop Security Model for Addressing Malicious Collusive Workers.}, journal = {Computational intelligence and neuroscience}, volume = {2021}, number = {}, pages = {5753948}, pmid = {34306054}, issn = {1687-5273}, mesh = {*Algorithms ; Big Data ; *Computer Communication Networks ; Humans ; }, abstract = {With the daily increase of data production and collection, Hadoop is a platform for processing big data on a distributed system. A master node globally manages running jobs, whereas worker nodes process partitions of the data locally. Hadoop uses MapReduce as an effective computing model. However, Hadoop experiences a high level of security vulnerability over hybrid and public clouds. Specially, several workers can fake results without actually processing their portions of the data. Several redundancy-based approaches have been proposed to counteract this risk. A replication mechanism is used to duplicate all or some of the tasks over multiple workers (nodes). A drawback of such approaches is that they generate a high overhead over the cluster. Additionally, malicious workers can behave well for a long period of time and attack later. This paper presents a novel model to enhance the security of the cloud environment against untrusted workers. A new component called malicious workers' trap (MWT) is developed to run on the master node to detect malicious (noncollusive and collusive) workers as they convert and attack the system. An implementation to test the proposed model and to analyze the performance of the system shows that the proposed model can accurately detect malicious workers with minor processing overhead compared to vanilla MapReduce and Verifiable MapReduce (V-MR) model [1]. In addition, MWT maintains a balance between the security and usability of the Hadoop cluster.}, } @article {pmid34305744, year = {2021}, author = {Tariq, MU and Poulin, M and Abonamah, AA}, title = {Achieving Operational Excellence Through Artificial Intelligence: Driving Forces and Barriers.}, journal = {Frontiers in psychology}, volume = {12}, number = {}, pages = {686624}, pmid = {34305744}, issn = {1664-1078}, abstract = {This paper presents an in-depth literature review on the driving forces and barriers for achieving operational excellence through artificial intelligence (AI). Artificial intelligence is a technological concept spanning operational management, philosophy, humanities, statistics, mathematics, computer sciences, and social sciences. AI refers to machines mimicking human behavior in terms of cognitive functions. The evolution of new technological procedures and advancements in producing intelligence for machines creates a positive impact on decisions, operations, strategies, and management incorporated in the production process of goods and services. Businesses develop various methods and solutions to extract meaningful information, such as big data, automatic production capabilities, and systematization for business improvement. The progress in organizational competitiveness is apparent through improvements in firm's decisions, resulting in increased operational efficiencies. Innovation with AI has enabled small businesses to reduce operating expenses and increase revenues. The focused literature review reveals the driving forces for achieving operational excellence through AI are improvement in computing abilities of machines, development of data-based AI, advancements in deep learning, cloud computing, data management, and integration of AI in operations. The barriers are mainly cultural constraints, fear of the unknown, lack of employee skills, and strategic planning for adopting AI. The current paper presents an analysis of articles focused on AI adoption in production and operations. We selected articles published between 2015 and 2020. Our study contributes to the literature reviews on operational excellence, artificial intelligence, driving forces for AI, and AI barriers in achieving operational excellence.}, } @article {pmid34305445, year = {2021}, author = {Sharma, SK and Ahmed, SS}, title = {IoT-based analysis for controlling & spreading prediction of COVID-19 in Saudi Arabia.}, journal = {Soft computing}, volume = {25}, number = {18}, pages = {12551-12563}, pmid = {34305445}, issn = {1432-7643}, abstract = {Presently, novel coronavirus outbreak 2019 (COVID-19) is a major threat to public health. Mathematical epidemic models can be utilized to forecast the course of an epidemic and cultivate approaches for controlling it. This paper utilizes the real data of spreading COVID-19 in Saudi Arabia for mathematical modeling and complex analyses. This paper introduces the Susceptible, Exposed, Infectious, Recovered, Undetectable, and Deceased (SEIRUD) and Machine learning algorithm to predict and control COVID-19 in Saudi Arabia.This COVID-19 has initiated many methods, such as cloud computing, edge-computing, IoT, artificial intelligence. The use of sensor devices has increased enormously. Similarly, several developments in solving the COVID-19 crisis have been used by IoT applications. The new technology relies on IoT variables and the roles of symptoms using wearable sensors to forecast cases of COVID-19. The working model involves wearable devices, occupational therapy, condition control, testing of cases, suspicious and IoT elements. Mathematical modeling is useful for understanding the fundamental principle of the transmission of COVID-19 and providing guidance for possible predictions. The method suggested predicts whether COVID-19 would expand or die in the long term in the population. The mathematical study results and related simulation are described here as a way of forecasting the progress and the possible end of the epidemic with three forms of scenarios: 'No Action,' 'Lockdowns and New Medicine.' The lock case slows it down the peak by minimizing infection and impacts area equality of the infected deformation. This study familiarizes the ideal protocol, which can support the Saudi population to breakdown spreading COVID-19 in an accurate and timely way. The simulation findings have been executed, and the suggested model enhances the accuracy ratio of 89.3%, prediction ratio of 88.7%, the precision ratio of 87.7%, recall ratio of 86.4%, and F1 score of 90.9% compared to other existing methods.}, } @article {pmid34300686, year = {2021}, author = {Huč, A and Šalej, J and Trebar, M}, title = {Analysis of Machine Learning Algorithms for Anomaly Detection on Edge Devices.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {14}, pages = {}, pmid = {34300686}, issn = {1424-8220}, mesh = {Algorithms ; *Internet of Things ; *Machine Learning ; }, abstract = {The Internet of Things (IoT) consists of small devices or a network of sensors, which permanently generate huge amounts of data. Usually, they have limited resources, either computing power or memory, which means that raw data are transferred to central systems or the cloud for analysis. Lately, the idea of moving intelligence to the IoT is becoming feasible, with machine learning (ML) moved to edge devices. The aim of this study is to provide an experimental analysis of processing a large imbalanced dataset (DS2OS), split into a training dataset (80%) and a test dataset (20%). The training dataset was reduced by randomly selecting a smaller number of samples to create new datasets Di (i = 1, 2, 5, 10, 15, 20, 40, 60, 80%). Afterwards, they were used with several machine learning algorithms to identify the size at which the performance metrics show saturation and classification results stop improving with an F1 score equal to 0.95 or higher, which happened at 20% of the training dataset. Further on, two solutions for the reduction of the number of samples to provide a balanced dataset are given. In the first, datasets DRi consist of all anomalous samples in seven classes and a reduced majority class ('NL') with i = 0.1, 0.2, 0.5, 1, 2, 5, 10, 15, 20 percent of randomly selected samples. In the second, datasets DCi are generated from the representative samples determined with clustering from the training dataset. All three dataset reduction methods showed comparable performance results. Further evaluation of training times and memory usage on Raspberry Pi 4 shows a possibility to run ML algorithms with limited sized datasets on edge devices.}, } @article {pmid34300671, year = {2021}, author = {Yar, H and Imran, AS and Khan, ZA and Sajjad, M and Kastrati, Z}, title = {Towards Smart Home Automation Using IoT-Enabled Edge-Computing Paradigm.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {14}, pages = {}, pmid = {34300671}, issn = {1424-8220}, mesh = {Automation ; *Delivery of Health Care ; *Privacy ; }, abstract = {Smart home applications are ubiquitous and have gained popularity due to the overwhelming use of Internet of Things (IoT)-based technology. The revolution in technologies has made homes more convenient, efficient, and even more secure. The need for advancement in smart home technology is necessary due to the scarcity of intelligent home applications that cater to several aspects of the home simultaneously, i.e., automation, security, safety, and reducing energy consumption using less bandwidth, computation, and cost. Our research work provides a solution to these problems by deploying a smart home automation system with the applications mentioned above over a resource-constrained Raspberry Pi (RPI) device. The RPI is used as a central controlling unit, which provides a cost-effective platform for interconnecting a variety of devices and various sensors in a home via the Internet. We propose a cost-effective integrated system for smart home based on IoT and Edge-Computing paradigm. The proposed system provides remote and automatic control to home appliances, ensuring security and safety. Additionally, the proposed solution uses the edge-computing paradigm to store sensitive data in a local cloud to preserve the customer's privacy. Moreover, visual and scalar sensor-generated data are processed and held over edge device (RPI) to reduce bandwidth, computation, and storage cost. In the comparison with state-of-the-art solutions, the proposed system is 5% faster in detecting motion, and 5 ms and 4 ms in switching relay on and off, respectively. It is also 6% more efficient than the existing solutions with respect to energy consumption.}, } @article {pmid34300531, year = {2021}, author = {Kosasih, DI and Lee, BG and Lim, H and Atiquzzaman, M}, title = {An Unsupervised Learning-Based Spatial Co-Location Detection System from Low-Power Consumption Sensor.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {14}, pages = {}, pmid = {34300531}, issn = {1424-8220}, support = {2020R1A2C1008589//National Research Foundation of Korea/ ; }, mesh = {*Algorithms ; Computers, Handheld ; Humans ; Smartphone ; *Unsupervised Machine Learning ; }, abstract = {Spatial co-location detection is the task of inferring the co-location of two or more objects in the geographic space. Mobile devices, especially a smartphone, are commonly employed to accomplish this task with the human object. Previous work focused on analyzing mobile GPS data to accomplish this task. While this approach may guarantee high accuracy from the perspective of the data, it is considered inefficient since knowing the object's absolute geographic location is not required to accomplish this task. This work proposed the implementation of the unsupervised learning-based algorithm, namely convolutional autoencoder, to infer the co-location of people from a low-power consumption sensor data-magnetometer readings. The idea is that if the trained model can also reconstruct the other data with the structural similarity (SSIM) index being above 0.5, we can then conclude that the observed individuals were co-located. The evaluation of our system has indicated that the proposed approach could recognize the spatial co-location of people from magnetometer readings.}, } @article {pmid34300497, year = {2021}, author = {Alhasnawi, BN and Jasim, BH and Rahman, ZSA and Siano, P}, title = {A Novel Robust Smart Energy Management and Demand Reduction for Smart Homes Based on Internet of Energy.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {14}, pages = {}, pmid = {34300497}, issn = {1424-8220}, mesh = {*Algorithms ; *Cloud Computing ; Computers ; Internet ; }, abstract = {In residential energy management (REM), Time of Use (ToU) of devices scheduling based on user-defined preferences is an essential task performed by the home energy management controller. This paper devised a robust REM technique capable of monitoring and controlling residential loads within a smart home. In this paper, a new distributed multi-agent framework based on the cloud layer computing architecture is developed for real-time microgrid economic dispatch and monitoring. In this paper the grey wolf optimizer (GWO), artificial bee colony (ABC) optimization algorithm-based Time of Use (ToU) pricing model is proposed to define the rates for shoulder-peak and on-peak hours. The results illustrate the effectiveness of the proposed the grey wolf optimizer (GWO), artificial bee colony (ABC) optimization algorithm based ToU pricing scheme. A Raspberry Pi3 based model of a well-known test grid topology is modified to support real-time communication with open-source IoE platform Node-Red used for cloud computing. Two levels communication system connects microgrid system, implemented in Raspberry Pi3, to cloud server. The local communication level utilizes IP/TCP and MQTT is used as a protocol for global communication level. The results demonstrate and validate the effectiveness of the proposed technique, as well as the capability to track the changes of load with the interactions in real-time and the fast convergence rate.}, } @article {pmid34300454, year = {2021}, author = {Stan, OP and Enyedi, S and Corches, C and Flonta, S and Stefan, I and Gota, D and Miclea, L}, title = {Method to Increase Dependability in a Cloud-Fog-Edge Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {14}, pages = {}, pmid = {34300454}, issn = {1424-8220}, mesh = {*Algorithms ; *Cloud Computing ; }, abstract = {Robots can be very different, from humanoids to intelligent self-driving cars or just IoT systems that collect and process local sensors' information. This paper presents a way to increase dependability for information exchange and processing in systems with Cloud-Fog-Edge architectures. In an ideal interconnected world, the recognized and registered robots must be able to communicate with each other if they are close enough, or through the Fog access points without overloading the Cloud. In essence, the presented work addresses the Edge area and how the devices can communicate in a safe and secure environment using cryptographic methods for structured systems. The presented work emphasizes the importance of security in a system's dependability and offers a communication mechanism for several robots without overburdening the Cloud. This solution is ideal to be used where various monitoring and control aspects demand extra degrees of safety. The extra private keys employed by this procedure further enhance algorithm complexity, limiting the probability that the method may be broken by brute force or systemic attacks.}, } @article {pmid34300439, year = {2021}, author = {Brescia, E and Costantino, D and Marzo, F and Massenio, PR and Cascella, GL and Naso, D}, title = {Automated Multistep Parameter Identification of SPMSMs in Large-Scale Applications Using Cloud Computing Resources.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {14}, pages = {}, pmid = {34300439}, issn = {1424-8220}, mesh = {*Cloud Computing ; *Computers ; Humans ; }, abstract = {Parameter identification of permanent magnet synchronous machines (PMSMs) represents a well-established research area. However, parameter estimation of multiple running machines in large-scale applications has not yet been investigated. In this context, a flexible and automated approach is required to minimize complexity, costs, and human interventions without requiring machine information. This paper proposes a novel identification strategy for surface PMSMs (SPMSMs), highly suitable for large-scale systems. A novel multistep approach using measurement data at different operating conditions of the SPMSM is proposed to perform the parameter identification without requiring signal injection, extra sensors, machine information, and human interventions. Thus, the proposed method overcomes numerous issues of the existing parameter identification schemes. An IoT/cloud architecture is designed to implement the proposed multistep procedure and massively perform SPMSM parameter identifications. Finally, hardware-in-the-loop results show the effectiveness of the proposed approach.}, } @article {pmid34283824, year = {2021}, author = {Hanussek, M and Bartusch, F and Krüger, J}, title = {Performance and scaling behavior of bioinformatic applications in virtualization environments to create awareness for the efficient use of compute resources.}, journal = {PLoS computational biology}, volume = {17}, number = {7}, pages = {e1009244}, pmid = {34283824}, issn = {1553-7358}, mesh = {Algorithms ; Benchmarking ; Cloud Computing ; Computational Biology/*methods/standards/statistics & numerical data ; Computers ; Computing Methodologies ; Data Interpretation, Statistical ; Databases, Factual/statistics & numerical data ; High-Throughput Nucleotide Sequencing ; Humans ; Image Interpretation, Computer-Assisted ; Machine Learning ; Sequence Alignment ; Software ; User-Computer Interface ; }, abstract = {The large amount of biological data available in the current times, makes it necessary to use tools and applications based on sophisticated and efficient algorithms, developed in the area of bioinformatics. Further, access to high performance computing resources is necessary, to achieve results in reasonable time. To speed up applications and utilize available compute resources as efficient as possible, software developers make use of parallelization mechanisms, like multithreading. Many of the available tools in bioinformatics offer multithreading capabilities, but more compute power is not always helpful. In this study we investigated the behavior of well-known applications in bioinformatics, regarding their performance in the terms of scaling, different virtual environments and different datasets with our benchmarking tool suite BOOTABLE. The tool suite includes the tools BBMap, Bowtie2, BWA, Velvet, IDBA, SPAdes, Clustal Omega, MAFFT, SINA and GROMACS. In addition we added an application using the machine learning framework TensorFlow. Machine learning is not directly part of bioinformatics but applied to many biological problems, especially in the context of medical images (X-ray photographs). The mentioned tools have been analyzed in two different virtual environments, a virtual machine environment based on the OpenStack cloud software and in a Docker environment. The gained performance values were compared to a bare-metal setup and among each other. The study reveals, that the used virtual environments produce an overhead in the range of seven to twenty-five percent compared to the bare-metal environment. The scaling measurements showed, that some of the analyzed tools do not benefit from using larger amounts of computing resources, whereas others showed an almost linear scaling behavior. The findings of this study have been generalized as far as possible and should help users to find the best amount of resources for their analysis. Further, the results provide valuable information for resource providers to handle their resources as efficiently as possible and raise the user community's awareness of the efficient usage of computing resources.}, } @article {pmid34283149, year = {2021}, author = {Zeng, X and Zhang, X and Yang, S and Shi, Z and Chi, C}, title = {Gait-Based Implicit Authentication Using Edge Computing and Deep Learning for Mobile Devices.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {13}, pages = {}, pmid = {34283149}, issn = {1424-8220}, support = {61802252//National Natural Science Foundation of China/ ; }, mesh = {*Biometric Identification ; Computers, Handheld ; *Deep Learning ; Gait ; Privacy ; }, abstract = {Implicit authentication mechanisms are expected to prevent security and privacy threats for mobile devices using behavior modeling. However, recently, researchers have demonstrated that the performance of behavioral biometrics is insufficiently accurate. Furthermore, the unique characteristics of mobile devices, such as limited storage and energy, make it subject to constrained capacity of data collection and processing. In this paper, we propose an implicit authentication architecture based on edge computing, coined Edge computing-based mobile Device Implicit Authentication (EDIA), which exploits edge-based gait biometric identification using a deep learning model to authenticate users. The gait data captured by a device's accelerometer and gyroscope sensors is utilized as the input of our optimized model, which consists of a CNN and a LSTM in tandem. Especially, we deal with extracting the features of gait signal in a two-dimensional domain through converting the original signal into an image, and then input it into our network. In addition, to reduce computation overhead of mobile devices, the model for implicit authentication is generated on the cloud server, and the user authentication process also takes place on the edge devices. We evaluate the performance of EDIA under different scenarios where the results show that i) we achieve a true positive rate of 97.77% and also a 2% false positive rate; and ii) EDIA still reaches high accuracy with limited dataset size.}, } @article {pmid34283139, year = {2021}, author = {Alwateer, M and Almars, AM and Areed, KN and Elhosseini, MA and Haikal, AY and Badawy, M}, title = {Ambient Healthcare Approach with Hybrid Whale Optimization Algorithm and Naïve Bayes Classifier.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {13}, pages = {}, pmid = {34283139}, issn = {1424-8220}, mesh = {*Algorithms ; Animals ; Bayes Theorem ; Big Data ; Delivery of Health Care ; *Whales ; }, abstract = {There is a crucial need to process patient's data immediately to make a sound decision rapidly; this data has a very large size and excessive features. Recently, many cloud-based IoT healthcare systems are proposed in the literature. However, there are still several challenges associated with the processing time and overall system efficiency concerning big healthcare data. This paper introduces a novel approach for processing healthcare data and predicts useful information with the support of the use of minimum computational cost. The main objective is to accept several types of data and improve accuracy and reduce the processing time. The proposed approach uses a hybrid algorithm which will consist of two phases. The first phase aims to minimize the number of features for big data by using the Whale Optimization Algorithm as a feature selection technique. After that, the second phase performs real-time data classification by using Naïve Bayes Classifier. The proposed approach is based on fog Computing for better business agility, better security, deeper insights with privacy, and reduced operation cost. The experimental results demonstrate that the proposed approach can reduce the number of datasets features, improve the accuracy and reduce the processing time. Accuracy enhanced by average rate: 3.6% (3.34 for Diabetes, 2.94 for Heart disease, 3.77 for Heart attack prediction, and 4.15 for Sonar). Besides, it enhances the processing speed by reducing the processing time by an average rate: 8.7% (28.96 for Diabetes, 1.07 for Heart disease, 3.31 for Heart attack prediction, and 1.4 for Sonar).}, } @article {pmid34283112, year = {2021}, author = {Agapiou, A and Lysandrou, V}, title = {Observing Thermal Conditions of Historic Buildings through Earth Observation Data and Big Data Engine.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {13}, pages = {}, pmid = {34283112}, issn = {1424-8220}, support = {INTEGRATED/0918/0034//This research was undertaken under the PERIsCOPE INTEGRATED/0918/0034 (Portal for heritage buildings integration into the contemporary built environment) is co-financed by the European Regional Development Fund and the Republic of Cyprus through the Resea/ ; }, abstract = {This study combines satellite observation, cloud platforms, and geographical information systems (GIS) to investigate at a macro-scale level of observation the thermal conditions of two historic clusters in Cyprus, namely in Limassol and Strovolos municipalities. The two case studies share different environmental and climatic conditions. The former site is coastal, the last a hinterland, and they both contain historic buildings with similar building materials and techniques. For the needs of the study, more than 140 Landsat 7 ETM+ and 8 LDCM images were processed at the Google Earth Engine big data cloud platform to investigate the thermal conditions of the two historic clusters over the period 2013-2020. The multi-temporal thermal analysis included the calibration of all images to provide land surface temperature (LST) products at a 100 m spatial resolution. Moreover, to investigate anomalies related to possible land cover changes of the area, two indices were extracted from the satellite images, the normalised difference vegetation index (NDVI) and the normalised difference build index (NDBI). Anticipated results include the macro-scale identification of multi-temporal changes, diachronic changes, the establishment of change patterns based on seasonality and location, occurring in large clusters of historic buildings.}, } @article {pmid34283102, year = {2021}, author = {Moon, J and Yang, M and Jeong, J}, title = {A Novel Approach to the Job Shop Scheduling Problem Based on the Deep Q-Network in a Cooperative Multi-Access Edge Computing Ecosystem.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {13}, pages = {}, pmid = {34283102}, issn = {1424-8220}, support = {IITP-2021-2020-0-01821//Ministry of Science and ICT, South Korea/ ; }, mesh = {*Cloud Computing ; *Ecosystem ; }, abstract = {In this study, based on multi-access edge computing (MEC), we provided the possibility of cooperating manufacturing processes. We tried to solve the job shop scheduling problem by applying DQN (deep Q-network), a reinforcement learning model, to this method. Here, to alleviate the overload of computing resources, an efficient DQN was used for the experiments using transfer learning data. Additionally, we conducted scheduling studies in the edge computing ecosystem of our manufacturing processes without the help of cloud centers. Cloud computing, an environment in which scheduling processing is performed, has issues sensitive to the manufacturing process in general, such as security issues and communication delay time, and research is being conducted in various fields, such as the introduction of an edge computing system that can replace them. We proposed a method of independently performing scheduling at the edge of the network through cooperative scheduling between edge devices within a multi-access edge computing structure. The proposed framework was evaluated, analyzed, and compared with existing frameworks in terms of providing solutions and services.}, } @article {pmid34283100, year = {2021}, author = {Chen, L and Grimstead, I and Bell, D and Karanka, J and Dimond, L and James, P and Smith, L and Edwardes, A}, title = {Estimating Vehicle and Pedestrian Activity from Town and City Traffic Cameras.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {13}, pages = {}, pmid = {34283100}, issn = {1424-8220}, support = {EP/P016782/1//Engineering and Physical Sciences Research Council (EPSRC) UK/ ; EP/R013411/1//Engineering and Physical Sciences Research Council (EPSRC) UK/ ; NE/P017134/1//Natural Environment Research Council (NERC)/ ; }, mesh = {Accidents, Traffic/prevention & control ; *COVID-19 ; Cities ; Humans ; *Pedestrians ; Reproducibility of Results ; SARS-CoV-2 ; Safety ; }, abstract = {Traffic cameras are a widely available source of open data that offer tremendous value to public authorities by providing real-time statistics to understand and monitor the activity levels of local populations and their responses to policy interventions such as those seen during the COrona VIrus Disease 2019 (COVID-19) pandemic. This paper presents an end-to-end solution based on the Google Cloud Platform with scalable processing capability to deal with large volumes of traffic camera data across the UK in a cost-efficient manner. It describes a deep learning pipeline to detect pedestrians and vehicles and to generate mobility statistics from these. It includes novel methods for data cleaning and post-processing using a Structure SImilarity Measure (SSIM)-based static mask that improves reliability and accuracy in classifying people and vehicles from traffic camera images. The solution resulted in statistics describing trends in the 'busyness' of various towns and cities in the UK. We validated time series against Automatic Number Plate Recognition (ANPR) cameras across North East England, showing a close correlation between our statistical output and the ANPR source. Trends were also favorably compared against traffic flow statistics from the UK's Department of Transport. The results of this work have been adopted as an experimental faster indicator of the impact of COVID-19 on the UK economy and society by the Office for National Statistics (ONS).}, } @article {pmid34282786, year = {2021}, author = {Ali, A and Iqbal, MM and Jamil, H and Qayyum, F and Jabbar, S and Cheikhrouhou, O and Baz, M and Jamil, F}, title = {An Efficient Dynamic-Decision Based Task Scheduler for Task Offloading Optimization and Energy Management in Mobile Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {13}, pages = {}, pmid = {34282786}, issn = {1424-8220}, mesh = {*Algorithms ; *Cloud Computing ; Computers ; Computers, Handheld ; }, abstract = {Restricted abilities of mobile devices in terms of storage, computation, time, energy supply, and transmission causes issues related to energy optimization and time management while processing tasks on mobile phones. This issue pertains to multifarious mobile device-related dimensions, including mobile cloud computing, fog computing, and edge computing. On the contrary, mobile devices' dearth of storage and processing power originates several issues for optimal energy and time management. These problems intensify the process of task retaining and offloading on mobile devices. This paper presents a novel task scheduling algorithm that addresses energy consumption and time execution by proposing an energy-efficient dynamic decision-based method. The proposed model quickly adapts to the cloud computing tasks and energy and time computation of mobile devices. Furthermore, we present a novel task scheduling server that performs the offloading computation process on the cloud, enhancing the mobile device's decision-making ability and computational performance during task offloading. The process of task scheduling harnesses the proposed empirical algorithm. The outcomes of this study enable effective task scheduling wherein energy consumption and task scheduling reduces significantly.}, } @article {pmid34276264, year = {2021}, author = {Risco, S and Moltó, G and Naranjo, DM and Blanquer, I}, title = {Serverless Workflows for Containerised Applications in the Cloud Continuum.}, journal = {Journal of grid computing}, volume = {19}, number = {3}, pages = {30}, pmid = {34276264}, issn = {1572-9184}, abstract = {This paper introduces an open-source platform to support serverless computing for scientific data-processing workflow-based applications across the Cloud continuum (i.e. simultaneously involving both on-premises and public Cloud platforms to process data captured at the edge). This is achieved via dynamic resource provisioning for FaaS platforms compatible with scale-to-zero approaches that minimise resource usage and cost for dynamic workloads with different elasticity requirements. The platform combines the usage of dynamically deployed auto-scaled Kubernetes clusters on on-premises Clouds and automated Cloud bursting into AWS Lambda to achieve higher levels of elasticity. A use case in public health for smart cities is used to assess the platform, in charge of detecting people not wearing face masks from captured videos. Faces are blurred for enhanced anonymity in the on-premises Cloud and detection via Deep Learning models is performed in AWS Lambda for this data-driven containerised workflow. The results indicate that hybrid workflows across the Cloud continuum can efficiently perform local data processing for enhanced regulations compliance and perform Cloud bursting for increased levels of elasticity.}, } @article {pmid34261111, year = {2021}, author = {Worrell, GA}, title = {Electrical Brain Stimulation for Epilepsy and Emerging Applications.}, journal = {Journal of clinical neurophysiology : official publication of the American Electroencephalographic Society}, volume = {38}, number = {6}, pages = {471-477}, doi = {10.1097/WNP.0000000000000819}, pmid = {34261111}, issn = {1537-1603}, mesh = {Brain ; *Deep Brain Stimulation ; *Epilepsy/therapy ; Humans ; *Mental Disorders/therapy ; Stereotaxic Techniques ; }, abstract = {Electrical brain stimulation is an established therapy for movement disorders, epilepsy, obsessive compulsive disorder, and a potential therapy for many other neurologic and psychiatric disorders. Despite significant progress and FDA approvals, there remain significant clinical gaps that can be addressed with next generation systems. Integrating wearable sensors and implantable brain devices with off-the-body computing resources (smart phones and cloud resources) opens a new vista for dense behavioral and physiological signal tracking coupled with adaptive stimulation therapy that should have applications for a range of brain and mind disorders. Here, we briefly review some history and current electrical brain stimulation applications for epilepsy, deep brain stimulation and responsive neurostimulation, and emerging applications for next generation devices and systems.}, } @article {pmid34257851, year = {2021}, author = {Guo, B and Ma, Y and Yang, J and Wang, Z}, title = {Smart Healthcare System Based on Cloud-Internet of Things and Deep Learning.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {4109102}, pmid = {34257851}, issn = {2040-2309}, mesh = {*Deep Learning ; Delivery of Health Care ; Humans ; *Internet of Things ; Neural Networks, Computer ; *Wearable Electronic Devices ; }, abstract = {INTRODUCTION: Health monitoring and remote diagnosis can be realized through Smart Healthcare. In view of the existing problems such as simple measurement parameters of wearable devices, huge computing pressure of cloud servers, and lack of individualization of diagnosis, a novel Cloud-Internet of Things (C-IOT) framework for medical monitoring is put forward.

METHODS: Smart phones are adopted as gateway devices to achieve data standardization and preprocess to generate health gray-scale map uploaded to the cloud server. The cloud server realizes the business logic processing and uses the deep learning model to carry out the gray-scale map calculation of health parameters. A deep learning model based on the convolution neural network (CNN) is constructed, in which six volunteers are selected to participate in the experiment, and their health data are marked by private doctors to generate initial data set.

RESULTS: Experimental results show the feasibility of the proposed framework. The test data set is used to test the CNN model after training; the forecast accuracy is over 77.6%.

CONCLUSION: The CNN model performs well in the recognition of health status. Collectively, this Smart Healthcare System is expected to assist doctors by improving the diagnosis of health status in clinical practice.}, } @article {pmid34256646, year = {2021}, author = {Morales-Botello, ML and Gachet, D and de Buenaga, M and Aparicio, F and Busto, MJ and Ascanio, JR}, title = {Chronic patient remote monitoring through the application of big data and internet of things.}, journal = {Health informatics journal}, volume = {27}, number = {3}, pages = {14604582211030956}, doi = {10.1177/14604582211030956}, pmid = {34256646}, issn = {1741-2811}, mesh = {Big Data ; Cloud Computing ; Humans ; Internet ; *Internet of Things ; Monitoring, Physiologic ; *Telemedicine ; }, abstract = {Chronic patients could benefit from the technological advances, but the clinical approaches for this kind of patients are still limited. This paper describes a system for chronic patients monitoring both, in home and external environments. For this purpose, we used novel technologies as big data, cloud computing and internet of things (IoT). Additionally, the system has been validated for three use cases: cardiovascular disease (CVD), hypertension (HPN) and chronic obstructive pulmonary disease (COPD), which were selected for their incidence in the population. This system is innovative within e-health, mainly due to the use of a big data architecture based on open-source components, also it provides a scalable and distributed environment for storage and processing of biomedical sensor data. The proposed system enables the incorporation of non-medical data sources in order to improve the self-management of chronic diseases and to develop better strategies for health interventions for chronic and dependents patients.}, } @article {pmid34250607, year = {2021}, author = {Miras Del Río, H and Ortiz Lora, A and Bertolet Reina, A and Terrón León, JA}, title = {A Monte Carlo dose calculation system for ophthalmic brachytherapy based on a realistic eye model.}, journal = {Medical physics}, volume = {48}, number = {8}, pages = {4542-4559}, doi = {10.1002/mp.15045}, pmid = {34250607}, issn = {2473-4209}, mesh = {*Brachytherapy ; *Eye Neoplasms/radiotherapy ; Humans ; Monte Carlo Method ; Phantoms, Imaging ; Radiotherapy Dosage ; Radiotherapy Planning, Computer-Assisted ; }, abstract = {PURPOSE: There is a growing trend towards the adoption of model-based calculation algorithms (MBDCAs) for brachytherapy dose calculations which can properly handle media and source/applicator heterogeneities. However, most of dose calculations in ocular plaque therapy are based on homogeneous water media and standard in-silico ocular phantoms, ignoring non-water equivalency of the anatomic tissues and heterogeneities in applicators and patient anatomy. In this work, we introduce EyeMC, a Monte Carlo (MC) model-based calculation algorithm for ophthalmic plaque brachytherapy using realistic and adaptable patient-specific eye geometries and materials.

METHODS: We used the MC code PENELOPE in EyeMC to model Bebig IsoSeed I25.S16 seeds in COMS plaques and [106] Ru/[106] Rh applicators that are coupled onto a customizable eye model with realistic geometry and composition. To significantly reduce calculation times, we integrated EyeMC with CloudMC, a cloud computing platform for radiation therapy calculations. EyeMC is equipped with an evaluation module that allows the generation of isodose distributions, dose-volume histograms, and comparisons with Plaque Simulator three-dimensional dose distribution. We selected a sample of patients treated with [125] I and [106] Ru isotopes in our institution, covering a variety of different type of plaques, tumor sizes, and locations. Results from EyeMC were compared to the original plan calculated by the TPS Plaque Simulation, studying the influence of heterogeneous media composition as well.

RESULTS: EyeMC calculations for Ru plaques agreed well with manufacturer's reference data and data of MC simulations from Hermida et al. (2013). Significant deviations, up to 20%, were only found in lateral profiles for notched plaques. As expected, media composition significantly affected estimated doses to different eye structures, especially in the [125] I cases evaluated. Dose to sclera and lens were found to be about 12% lower when considering real media, while average dose to tumor was 9% higher. [106] Ru cases presented a 1%-3% dose reduction in all structures using real media for calculation, except for the lens, which showed an average dose 7.6% lower than water-based calculations. Comparisons with Plaque Simulator calculations showed large differences in dose to critical structures for [106] Ru notched plaques. [125] I cases presented significant and systematic dose deviations when using the default calculation parameters from Plaque Simulator version 5.3.8., which were corrected when using calculation parameters from a custom physics model for carrier-attenuation and air-interface correction functions.

CONCLUSIONS: EyeMC is a MC calculation system for ophthalmic brachytherapy based on a realistic and customizable eye-tumor model which includes the main eye structures with their real composition. Integrating this tool into a cloud computing environment allows to perform high-precision MC calculations of ocular plaque treatments in short times. The observed variability in eye anatomy among the selected cases justifies the use of patient-specific models.}, } @article {pmid34249300, year = {2021}, author = {Zhou, C and Hu, J and Chen, N}, title = {Remote Care Assistance in Emergency Department Based on Smart Medical.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {9971960}, pmid = {34249300}, issn = {2040-2309}, mesh = {*Artificial Intelligence ; Big Data ; *Cloud Computing ; Emergency Service, Hospital ; Humans ; Remote Sensing Technology ; }, abstract = {Smart medical care is user-centric, medical information is the main line, and big data, Internet of Things, cloud computing, artificial intelligence, and other technologies are used to establish scientific and accurate information, as well as an efficient and reasonable medical service system. Smart medical plays an important role in alleviating doctor-patient conflicts caused by information asymmetry, regional health differences caused by irrational allocation of medical resources, and improving medical service levels. This article mainly introduces the remote care assistance system of emergency department based on smart medical and intends to provide some ideas and directions for the technical research of patients in emergency department receiving remote care. This paper proposes a research method for remote care assistance in emergency departments based on smart medical, including an overview of remote care based on smart medical, remote care sensor real-time monitoring algorithms based on smart medical, signal detection algorithms, and signal clustering algorithms for smart medical. Remote care in the emergency department assisted in research experiments. The experimental results show that 86.0% of patients like the remote care system based on smart medical studied in this paper.}, } @article {pmid34249298, year = {2021}, author = {Zhao, X and Liu, J and Ji, B and Wang, L}, title = {Service Migration Policy Optimization considering User Mobility for E-Healthcare Applications.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {9922876}, pmid = {34249298}, issn = {2040-2309}, mesh = {Algorithms ; *Cloud Computing ; Humans ; Public Policy ; *Telemedicine ; }, abstract = {Mobile edge computing (MEC) is an emerging technology that provides cloud services at the edge of network to enable latency-critical and resource-intensive E-healthcare applications. User mobility is common in MEC. User mobility can result in an interruption of ongoing edge services and a dramatic drop in quality of service. Service migration has a great potential to address the issues and brings inevitable cost for the system. In this paper, we propose a service migration solution based on migration zone and formulate service migration cost with a comprehensive model that captures the key challenges. Then, we formulate service migration problem into Markov decision process to obtain optimal service migration policies that decide where to migrate in a limited area. We propose three algorithms to resolve the optimization problem given by the formulated model. Finally, we demonstrate the performance of our proposed algorithms by carrying out extensive experiments. We show that the proposed service migration approach reduces the total cost by up to 3 times compared to no migration and outperforms the general solution in terms of the total expected reward.}, } @article {pmid34228752, year = {2021}, author = {Qu, N and You, W}, title = {Design and fault diagnosis of DCS sintering furnace's temperature control system for edge computing.}, journal = {PloS one}, volume = {16}, number = {7}, pages = {e0253246}, pmid = {34228752}, issn = {1932-6203}, abstract = {Under the background of modern industrial processing and production, the sintering furnace's temperature control system is researched to achieve intelligent smelting and reduce energy consumption. First, the specific application and implementation of edge computing in industrial processing and production are analyzed. The industrial processing and production intelligent equipment based on edge computing includes the equipment layer, the edge layer, and the cloud platform layer. This architecture improves the operating efficiency of the intelligent control system. Then, the sintering furnace in the metallurgical industry is taken as an example. The sintering furnace connects powder material particles at high temperatures; thus, the core temperature control system is investigated. Under the actual sintering furnace engineering design, the Distributed Control System (DCS) is used as the basis of sintering furnace temperature control, and the Programmable Logic Controller (PLC) is adopted to reduce the electrical wiring and switch contacts. The hardware circuit of DCS is designed; on this basis, an embedded operating system with excellent performance is transplanted according to functional requirements. The final DCS-based temperature control system is applied to actual monitoring. The real-time temperature of the upper, middle, and lower currents of 1# sintering furnace at a particular point is measured to be 56.95°C, 56.58°C, and 57.2°C, respectively. The real-time temperature of the upper, middle, and lower currents of 2# sintering furnaces at a particular point is measured to be 144.7°C, 143.8°C, and 144.0°C, respectively. Overall, the temperature control deviation of the three currents of the two sintering furnaces stays in the controllable range. An expert system based on fuzzy logic in the fault diagnosis system can comprehensively predict the situation of the sintering furnaces. The prediction results of the sintering furnace's faults are closer to the actual situation compared with the fault diagnosis method based on the Backpropagation (BP) neural network. The designed system makes up for the shortcomings of the sintering furnace's traditional temperature control systems and can control the temperature of the sintering furnace intelligently and scientifically. Besides, it can diagnose equipment faults timely and efficiently, thereby improving the sintering efficiency.}, } @article {pmid34227850, year = {2021}, author = {Qin, J and Mei, G and Ma, Z and Piccialli, F}, title = {General Paradigm of Edge-Based Internet of Things Data Mining for Geohazard Prevention.}, journal = {Big data}, volume = {9}, number = {5}, pages = {373-389}, doi = {10.1089/big.2020.0392}, pmid = {34227850}, issn = {2167-647X}, mesh = {Cloud Computing ; Data Mining ; Humans ; *Internet of Things ; }, abstract = {Geological hazards (geohazards) are geological processes or phenomena formed under external-induced factors causing losses to human life and property. Geohazards are sudden, cause great harm, and have broad ranges of influence, which bring considerable challenges to geohazard prevention. Monitoring and early warning are the most common strategies to prevent geohazards. With the development of the internet of things (IoT), IoT-based monitoring devices provide rich and fine data, making geohazard monitoring and early warning more accurate and effective. IoT-based monitoring data can be transmitted to a cloud center for processing to provide credible data references for geohazard early warning. However, the massive numbers of IoT devices occupy most resources of the cloud center, which increases the data processing delay. Moreover, limited bandwidth restricts the transmission of large amounts of geohazard monitoring data. Thus, in some cases, cloud computing is not able to meet the real-time requirements of geohazard early warning. Edge computing technology processes data closer to the data source than to the cloud center, which provides the opportunity for the rapid processing of monitoring data. This article presents the general paradigm of edge-based IoT data mining for geohazard prevention, especially monitoring and early warning. The paradigm mainly includes data acquisition, data mining and analysis, and data interpretation. Moreover, a real case is used to illustrate the details of the presented general paradigm. Finally, this article discusses several key problems for the general paradigm of edge-based IoT data mining for geohazard prevention.}, } @article {pmid34226796, year = {2022}, author = {Shin, H and Lee, K and Kwon, HY}, title = {A comparative experimental study of distributed storage engines for big spatial data processing using GeoSpark.}, journal = {The Journal of supercomputing}, volume = {78}, number = {2}, pages = {2556-2579}, pmid = {34226796}, issn = {0920-8542}, abstract = {With increasing numbers of GPS-equipped mobile devices, we are witnessing a deluge of spatial information that needs to be effectively and efficiently managed. Even though there are several distributed spatial data processing systems such as GeoSpark (Apache Sedona), the effects of underlying storage engines have not been well studied for spatial data processing. In this paper, we evaluate the performance of various distributed storage engines for processing large-scale spatial data using GeoSpark, a state-of-the-art distributed spatial data processing system running on top of Apache Spark. For our performance evaluation, we choose three distributed storage engines having different characteristics: (1) HDFS, (2) MongoDB, and (3) Amazon S3. To conduct our experimental study on a real cloud computing environment, we utilize Amazon EMR instances (up to 6 instances) for distributed spatial data processing. For the evaluation of big spatial data processing, we generate data sets considering four kinds of various data distributions and various data sizes up to one billion point records (38.5GB raw size). Through the extensive experiments, we measure the processing time of storage engines with the following variations: (1) sharding strategies in MongoDB, (2) caching effects, (3) data distributions, (4) data set sizes, (5) the number of running executors and storage nodes, and (6) the selectivity of queries. The major points observed from the experiments are summarized as follows. (1) The overall performance of MongoDB-based GeoSpark is degraded compared to HDFS- and S3-based GeoSpark in our experimental settings. (2) The performance of MongoDB-based GeoSpark is relatively improved in large-scale data sets compared to the others. (3) HDFS- and S3-based GeoSpark are more scalable to running executors and storage nodes compared to MongoDB-based GeoSpark. (4) The sharding strategy based on the spatial proximity significantly improves the performance of MongoDB-based GeoSpark. (5) S3- and HDFS-based GeoSpark show similar performances in all the environmental settings. (6) Caching in distributed environments improves the overall performance of spatial data processing. These results can be usefully utilized in decision-making of choosing the most adequate storage engine for big spatial data processing in a target distributed environment.}, } @article {pmid34220289, year = {2022}, author = {Singh, VK and Kolekar, MH}, title = {Deep learning empowered COVID-19 diagnosis using chest CT scan images for collaborative edge-cloud computing platform.}, journal = {Multimedia tools and applications}, volume = {81}, number = {1}, pages = {3-30}, pmid = {34220289}, issn = {1380-7501}, abstract = {The novel coronavirus outbreak has spread worldwide, causing respiratory infections in humans, leading to a huge global pandemic COVID-19. According to World Health Organization, the only way to curb this spread is by increasing the testing and isolating the infected. Meanwhile, the clinical testing currently being followed is not easily accessible and requires much time to give the results. In this scenario, remote diagnostic systems could become a handy solution. Some existing studies leverage the deep learning approach to provide an effective alternative to clinical diagnostic techniques. However, it is difficult to use such complex networks in resource constraint environments. To address this problem, we developed a fine-tuned deep learning model inspired by the architecture of the MobileNet V2 model. Moreover, the developed model is further optimized in terms of its size and complexity to make it compatible with mobile and edge devices. The results of extensive experimentation performed on a real-world dataset consisting of 2482 chest Computerized Tomography scan images strongly suggest the superiority of the developed fine-tuned deep learning model in terms of high accuracy and faster diagnosis time. The proposed model achieved a classification accuracy of 96.40%, with approximately ten times shorter response time than prevailing deep learning models. Further, McNemar's statistical test results also prove the efficacy of the proposed model.}, } @article {pmid34219861, year = {2021}, author = {Mandal, S and Khan, DA and Jain, S}, title = {Cloud-Based Zero Trust Access Control Policy: An Approach to Support Work-From-Home Driven by COVID-19 Pandemic.}, journal = {New generation computing}, volume = {39}, number = {3-4}, pages = {599-622}, pmid = {34219861}, issn = {0288-3635}, abstract = {The ubiquitous cloud computing services provide a new paradigm to the work-from-home environment adopted by the enterprise in the unprecedented crisis of the COVID-19 outbreak. However, the change in work culture would also increase the chances of the cybersecurity attack, MAC spoofing attack, and DDoS/DoS attack due to the divergent incoming traffic from the untrusted network for accessing the enterprise's resources. Networks are usually unable to detect spoofing if the intruder already forges the host's MAC address. However, the techniques used in the existing researches mistakenly classify the malicious host as the legitimate one. This paper proposes a novel access control policy based on a zero-trust network by explicitly restricting the incoming network traffic to substantiate MAC spoofing attacks in the software-defined network (SDN) paradigm of cloud computing. The multiplicative increase and additive decrease algorithm helps to detect the advanced MAC spoofing attack before penetrating the SDN-based cloud resources. Based on the proposed approach, a dynamic threshold is assigned to the incoming port number. The self-learning feature of the threshold stamping helps to rectify a legitimate user's traffic before classifying it to the attacker. Finally, the mathematical and experimental results exhibit high accuracy and detection rate than the existing methodologies. The novelty of this approach strengthens the security of the SDN paradigm of cloud resources by redefining conventional access control policy.}, } @article {pmid34211547, year = {2021}, author = {Ni, L and Sun, X and Li, X and Zhang, J}, title = {GCWOAS2: Multiobjective Task Scheduling Strategy Based on Gaussian Cloud-Whale Optimization in Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2021}, number = {}, pages = {5546758}, pmid = {34211547}, issn = {1687-5273}, mesh = {Algorithms ; Animals ; *Cloud Computing ; Normal Distribution ; *Whales ; }, abstract = {An important challenge facing cloud computing is how to correctly and effectively handle and serve millions of users' requests. Efficient task scheduling in cloud computing can intuitively affect the resource configuration and operating cost of the entire system. However, task and resource scheduling in a cloud computing environment is an NP-hard problem. In this paper, we propose a three-layer scheduling model based on whale-Gaussian cloud. In the second layer of the model, a whale optimization strategy based on the Gaussian cloud model (GCWOAS2) is used for multiobjective task scheduling in a cloud computing which is to minimize the completion time of the task via effectively utilizing the virtual machine resources and to keep the load balancing of each virtual machine, reducing the operating cost of the system. In the GCWOAS2 strategy, an opposition-based learning mechanism is first used to initialize the scheduling strategy to generate the optimal scheduling scheme. Then, an adaptive mobility factor is proposed to dynamically expand the search range. The whale optimization algorithm based on the Gaussian cloud model is proposed to enhance the randomness of search. Finally, a multiobjective task scheduling algorithm based on Gaussian whale-cloud optimization (GCWOA) is presented, so that the entire scheduling strategy can not only expand the search range but also jump out of the local maximum and obtain the global optimal scheduling strategy. Experimental results show that compared with other existing metaheuristic algorithms, our strategy can not only shorten the task completion time but also balance the load of virtual machine resources, and at the same time, it also has a better performance in resource utilization.}, } @article {pmid34209509, year = {2021}, author = {Pinheiro, A and Canedo, ED and Albuquerque, RO and de Sousa Júnior, RT}, title = {Validation of Architecture Effectiveness for the Continuous Monitoring of File Integrity Stored in the Cloud Using Blockchain and Smart Contracts.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {13}, pages = {}, pmid = {34209509}, issn = {1424-8220}, mesh = {*Blockchain ; Cloud Computing ; Technology ; }, abstract = {The management practicality and economy offered by the various technological solutions based on cloud computing have attracted many organizations, which have chosen to migrate services to the cloud, despite the numerous challenges arising from this migration. Cloud storage services are emerging as a relevant solution to meet the legal requirements of maintaining custody of electronic documents for long periods. However, the possibility of losses and the consequent financial damage require the permanent monitoring of this information. In a previous work named "Monitoring File Integrity Using Blockchain and Smart Contracts", the authors proposed an architecture based on blockchain, smart contract, and computational trust technologies that allows the periodic monitoring of the integrity of files stored in the cloud. However, the experiments carried out in the initial studies that validated the architecture included only small- and medium-sized files. As such, this paper presents a validation of the architecture to determine its effectiveness and efficiency when storing large files for long periods. The article provides an improved and detailed description of the proposed processes, followed by a security analysis of the architecture. The results of both the validation experiments and the implemented defense mechanism analysis confirm the security and the efficiency of the architecture in identifying corrupted files, regardless of file size and storage time.}, } @article {pmid34209400, year = {2021}, author = {Zhou, H and Zhang, W and Wang, C and Ma, X and Yu, H}, title = {BBNet: A Novel Convolutional Neural Network Structure in Edge-Cloud Collaborative Inference.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {13}, pages = {}, pmid = {34209400}, issn = {1424-8220}, support = {61976098//Natural Science Foundation of China/ ; 2020C067//Technology Development Foundation of Quanzhou City/ ; }, mesh = {*Data Compression ; *Neural Networks, Computer ; }, abstract = {Edge-cloud collaborative inference can significantly reduce the delay of a deep neural network (DNN) by dividing the network between mobile edge and cloud. However, the in-layer data size of DNN is usually larger than the original data, so the communication time to send intermediate data to the cloud will also increase end-to-end latency. To cope with these challenges, this paper proposes a novel convolutional neural network structure-BBNet-that accelerates collaborative inference from two levels: (1) through channel-pruning: reducing the number of calculations and parameters of the original network; (2) through compressing the feature map at the split point to further reduce the size of the data transmitted. In addition, This paper implemented the BBNet structure based on NVIDIA Nano and the server. Compared with the original network, BBNet's FLOPs and parameter achieve up to 5.67× and 11.57× on the compression rate, respectively. In the best case, the feature compression layer can reach a bit-compression rate of 512×. Compared with the better bandwidth conditions, BBNet has a more obvious inference delay when the network conditions are poor. For example, when the upload bandwidth is only 20 kb/s, the end-to-end latency of BBNet is increased by 38.89× compared with the cloud-only approach.}, } @article {pmid34207851, year = {2021}, author = {Mendez, J and Molina, M and Rodriguez, N and Cuellar, MP and Morales, DP}, title = {Camera-LiDAR Multi-Level Sensor Fusion for Target Detection at the Network Edge.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {12}, pages = {}, pmid = {34207851}, issn = {1424-8220}, support = {Y01 ES001027/ES/NIEHS NIH HHS/United States ; }, mesh = {*Algorithms ; Automobiles ; Lasers ; *Machine Learning ; }, abstract = {There have been significant advances regarding target detection in the autonomous vehicle context. To develop more robust systems that can overcome weather hazards as well as sensor problems, the sensor fusion approach is taking the lead in this context. Laser Imaging Detection and Ranging (LiDAR) and camera sensors are two of the most used sensors for this task since they can accurately provide important features such as target´s depth and shape. However, most of the current state-of-the-art target detection algorithms for autonomous cars do not take into consideration the hardware limitations of the vehicle such as the reduced computing power in comparison with Cloud servers as well as the reduced latency. In this work, we propose Edge Computing Tensor Processing Unit (TPU) devices as hardware support due to their computing capabilities for machine learning algorithms as well as their reduced power consumption. We developed an accurate and small target detection model for these devices. Our proposed Multi-Level Sensor Fusion model has been optimized for the network edge, specifically for the Google Coral TPU. As a result, high accuracy results are obtained while reducing the memory consumption as well as the latency of the system using the challenging KITTI dataset.}, } @article {pmid34207675, year = {2021}, author = {Caminero, AC and Muñoz-Mansilla, R}, title = {Quality of Service Provision in Fog Computing: Network-Aware Scheduling of Containers.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {12}, pages = {}, pmid = {34207675}, issn = {1424-8220}, support = {FILE (2019V/PUNED/004)//Universidad Nacional de Educación a Distancia/ ; SMARTTRAFFIC (2019V/EUIN-UNED/003)//Universidad Nacional de Educación a Distancia/ ; SNOLA, RED2018-102725-T//Ministerio de Economía, Industria y Competitividad, Gobierno de España/ ; e-Madrid-CM (P2018/TCS-4307)//Comunidad de Madrid/ ; }, mesh = {Algorithms ; *Cloud Computing ; *Internet of Things ; }, abstract = {State-of-the-art scenarios, such as Internet of Things (IoT) and Smart Cities, have recently arisen. They involve the processing of huge data sets under strict time requirements, rendering the use of cloud resources unfeasible. For this reason, Fog computing has been proposed as a solution; however, there remains a need for intelligent allocation decisions, in order to make it a fully usable solution in such contexts. In this paper, a network-aware scheduling algorithm is presented, which aims to select the fog node most suitable for the execution of an application within a given deadline. This decision is made taking the status of the network into account. This scheduling algorithm was implemented as an extension to the Kubernetes default scheduler, and compared with existing proposals in the literature. The comparison shows that our proposal is the only one that can execute all the submitted jobs within their deadlines (i.e., no job is rejected or executed exceeding its deadline) with certain configurations in some of the scenarios tested, thus obtaining an optimal solution in such scenarios.}, } @article {pmid34207511, year = {2021}, author = {Pauca, O and Maxim, A and Caruntu, CF}, title = {Multivariable Optimisation for Waiting-Time Minimisation at Roundabout Intersections in a Cyber-Physical Framework.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {12}, pages = {}, pmid = {34207511}, issn = {1424-8220}, support = {PN-III-P1-1.1-TE-2019-1123//Unitatea Executiva pentru Finantarea Invatamantului Superior, a Cercetarii, Dezvoltarii si Inovarii/ ; PN-III-P1-1.1-PD-2019-0757//Unitatea Executiva pentru Finantarea Invatamantului Superior, a Cercetarii, Dezvoltarii si Inovarii/ ; }, mesh = {*Accidents, Traffic ; Humans ; Safety ; *Waiting Lists ; }, abstract = {The evolution of communication networks offers new possibilities for development in the automotive industry. Smart vehicles will benefit from the possibility of connecting with the infrastructure and from an extensive exchange of data between them. Furthermore, new control strategies can be developed that benefit the advantages of these communication networks. In this endeavour, the main purposes considered by the automotive industry and researchers from academia are defined by: (i) ensuring people's safety; (ii) reducing the overall costs, and (iii) improving the traffic by maximising the fluidity. In this paper, a cyber-physical framework (CPF) to control the access of vehicles in roundabout intersections composed of two levels is proposed. Both levels correspond to the cyber part of the CPF, while the physical part is composed of the vehicles crossing the roundabout. The first level, i.e., the edge-computing layer, is based on an analytical solution that uses multivariable optimisation to minimise the waiting times of the vehicles entering a roundabout intersection and to ensure a safe crossing. The second level, i.e., the cloud-computing layer, stores information about the waiting times and trajectories of all the vehicles that cross the roundabout and uses them for long-term analysis and prediction. The simulated results show the efficacy of the proposed method, which can be easily implemented on an embedded device for real-time operation.}, } @article {pmid34200488, year = {2021}, author = {Bao, Y and Lin, P and Li, Y and Qi, Y and Wang, Z and Du, W and Fan, Q}, title = {Parallel Structure from Motion for Sparse Point Cloud Generation in Large-Scale Scenes.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {11}, pages = {}, pmid = {34200488}, issn = {1424-8220}, support = {ZR2020MF132//Natural Science Foundation of Shandong Province/ ; 62072020//National Natural Science Foundation of China/ ; 2017YFB1002602//National Key R&D Program of China/ ; No.VRLAB2019A03//Open Project Program of State Key Laboratory of Virtual Reality Technology and Systems, Beihang University/ ; No.19-3-2-21-zhc//Qingdao Leading Scholars Project on Innovation and Entrepreneurship 2019/ ; }, abstract = {Scene reconstruction uses images or videos as input to reconstruct a 3D model of a real scene and has important applications in smart cities, surveying and mapping, military, and other fields. Structure from motion (SFM) is a key step in scene reconstruction, which recovers sparse point clouds from image sequences. However, large-scale scenes cannot be reconstructed using a single compute node. Image matching and geometric filtering take up a lot of time in the traditional SFM problem. In this paper, we propose a novel divide-and-conquer framework to solve the distributed SFM problem. First, we use the global navigation satellite system (GNSS) information from images to calculate the GNSS neighborhood. The number of images matched is greatly reduced by matching each image to only valid GNSS neighbors. This way, a robust matching relationship can be obtained. Second, the calculated matching relationship is used as the initial camera graph, which is divided into multiple subgraphs by the clustering algorithm. The local SFM is executed on several computing nodes to register the local cameras. Finally, all of the local camera poses are integrated and optimized to complete the global camera registration. Experiments show that our system can accurately and efficiently solve the structure from motion problem in large-scale scenes.}, } @article {pmid34200090, year = {2021}, author = {Kuaban, GS and Atmaca, T and Kamli, A and Czachórski, T and Czekalski, P}, title = {Performance Analysis of Packet Aggregation Mechanisms and Their Applications in Access (e.g., IoT, 4G/5G), Core, and Data Centre Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {11}, pages = {}, pmid = {34200090}, issn = {1424-8220}, mesh = {*Cloud Computing ; }, abstract = {The transmission of massive amounts of small packets generated by access networks through high-speed Internet core networks to other access networks or cloud computing data centres has introduced several challenges such as poor throughput, underutilisation of network resources, and higher energy consumption. Therefore, it is essential to develop strategies to deal with these challenges. One of them is to aggregate smaller packets into a larger payload packet, and these groups of aggregated packets will share the same header, hence increasing throughput, improved resource utilisation, and reduction in energy consumption. This paper presents a review of packet aggregation applications in access networks (e.g., IoT and 4G/5G mobile networks), optical core networks, and cloud computing data centre networks. Then we propose new analytical models based on diffusion approximation for the evaluation of the performance of packet aggregation mechanisms. We demonstrate the use of measured traffic from real networks to evaluate the performance of packet aggregation mechanisms analytically. The use of diffusion approximation allows us to consider time-dependent queueing models with general interarrival and service time distributions. Therefore these models are more general than others presented till now.}, } @article {pmid34199981, year = {2021}, author = {Nouh, R and Singh, M and Singh, D}, title = {SafeDrive: Hybrid Recommendation System Architecture for Early Safety Predication Using Internet of Vehicles.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {11}, pages = {}, pmid = {34199981}, issn = {1424-8220}, mesh = {*Accidents, Traffic/prevention & control ; *Automobile Driving ; Internet ; Risk Factors ; Safety ; Technology ; }, abstract = {The Internet of vehicles (IoV) is a rapidly emerging technological evolution of Intelligent Transportation System (ITS). This paper proposes SafeDrive, a dynamic driver profile (DDP) using a hybrid recommendation system. DDP is a set of functional modules, to analyses individual driver's behaviors, using prior violation and accident records, to identify driving risk patterns. In this paper, we have considered three synthetic data-sets for 1500 drivers based on their profile information, risk parameters information, and risk likelihood. In addition, we have also considered the driver's historical violation/accident data-set records based on four risk-score levels such as high-risk, medium-risk, low-risk, and no-risk to predict current and future driver risk scores. Several error calculation methods have been applied in this study to analyze our proposed hybrid recommendation systems' performance to classify the driver's data with higher accuracy based on various criteria. The evaluated results help to improve the driving behavior and broadcast early warning alarm to the other vehicles in IoV environment for the overall road safety. Moreover, the propoed model helps to provide a safe and predicted environment for vehicles, pedestrians, and road objects, with the help of regular monitoring of vehicle motion, driver behavior, and road conditions. It also enables accurate prediction of accidents beforehand, and also minimizes the complexity of on-road vehicles and latency due to fog/cloud computing servers.}, } @article {pmid34199831, year = {2021}, author = {Wang, Q and Su, M and Zhang, M and Li, R}, title = {Integrating Digital Technologies and Public Health to Fight Covid-19 Pandemic: Key Technologies, Applications, Challenges and Outlook of Digital Healthcare.}, journal = {International journal of environmental research and public health}, volume = {18}, number = {11}, pages = {}, pmid = {34199831}, issn = {1660-4601}, support = {18YJA790081//Social Science Fund of Ministry of Education of China/ ; Grant No. ZR2018MG016//Natural Science Foundation of Shandong Province, China/ ; }, mesh = {Artificial Intelligence ; *COVID-19 ; China/epidemiology ; Delivery of Health Care ; Digital Technology ; Humans ; *Pandemics/prevention & control ; Public Health ; SARS-CoV-2 ; }, abstract = {Integration of digital technologies and public health (or digital healthcare) helps us to fight the Coronavirus Disease 2019 (COVID-19) pandemic, which is the biggest public health crisis humanity has faced since the 1918 Influenza Pandemic. In order to better understand the digital healthcare, this work conducted a systematic and comprehensive review of digital healthcare, with the purpose of helping us combat the COVID-19 pandemic. This paper covers the background information and research overview of digital healthcare, summarizes its applications and challenges in the COVID-19 pandemic, and finally puts forward the prospects of digital healthcare. First, main concepts, key development processes, and common application scenarios of integrating digital technologies and digital healthcare were offered in the part of background information. Second, the bibliometric techniques were used to analyze the research output, geographic distribution, discipline distribution, collaboration network, and hot topics of digital healthcare before and after COVID-19 pandemic. We found that the COVID-19 pandemic has greatly accelerated research on the integration of digital technologies and healthcare. Third, application cases of China, EU and U.S using digital technologies to fight the COVID-19 pandemic were collected and analyzed. Among these digital technologies, big data, artificial intelligence, cloud computing, 5G are most effective weapons to combat the COVID-19 pandemic. Applications cases show that these technologies play an irreplaceable role in controlling the spread of the COVID-19. By comparing the application cases in these three regions, we contend that the key to China's success in avoiding the second wave of COVID-19 pandemic is to integrate digital technologies and public health on a large scale without hesitation. Fourth, the application challenges of digital technologies in the public health field are summarized. These challenges mainly come from four aspects: data delays, data fragmentation, privacy security, and data security vulnerabilities. Finally, this study provides the future application prospects of digital healthcare. In addition, we also provide policy recommendations for other countries that use digital technology to combat COVID-19.}, } @article {pmid34198526, year = {2021}, author = {Kim, J and Lee, J and Kim, T}, title = {AdaMM: Adaptive Object Movement and Motion Tracking in Hierarchical Edge Computing System.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {12}, pages = {}, pmid = {34198526}, issn = {1424-8220}, support = {2018-0-01502//Ministry of Science and ICT, South Korea/ ; GCU-202008450004//Gachon University research fund of 2020/ ; }, abstract = {This paper presents a novel adaptive object movement and motion tracking (AdaMM) framework in a hierarchical edge computing system for achieving GPU memory footprint reduction of deep learning (DL)-based video surveillance services. DL-based object movement and motion tracking requires a significant amount of resources, such as (1) GPU processing power for the inference phase and (2) GPU memory for model loading. Despite the absence of an object in the video, if the DL model is loaded, the GPU memory must be kept allocated for the loaded model. Moreover, in several cases, video surveillance tries to capture events that rarely occur (e.g., abnormal object behaviors); therefore, such standby GPU memory might be easily wasted. To alleviate this problem, the proposed AdaMM framework categorizes the tasks used for the object movement and motion tracking procedure in an increasing order of the required processing and memory resources as task (1) frame difference calculation, task (2) object detection, and task (3) object motion and movement tracking. The proposed framework aims to adaptively release the unnecessary standby object motion and movement tracking model to save GPU memory by utilizing light tasks, such as frame difference calculation and object detection in a hierarchical manner. Consequently, object movement and motion tracking are adaptively triggered if the object is detected within the specified threshold time; otherwise, the GPU memory for the model of task (3) can be released. Moreover, object detection is also adaptively performed if the frame difference over time is greater than the specified threshold. We implemented the proposed AdaMM framework using commercial edge devices by considering a three-tier system, such as the 1st edge node for both tasks (1) and (2), the 2nd edge node for task (3), and the cloud for sending a push alarm. A measurement-based experiment reveals that the proposed framework achieves a maximum GPU memory reduction of 76.8% compared to the baseline system, while requiring a 2680 ms delay for loading the model for object movement and motion tracking.}, } @article {pmid34192136, year = {2020}, author = {Miseikis, J and Caroni, P and Duchamp, P and Gasser, A and Marko, R and Miseikiene, N and Zwilling, F and de Castelbajac, C and Eicher, L and Fruh, M and Fruh, H}, title = {Lio-A Personal Robot Assistant for Human-Robot Interaction and Care Applications.}, journal = {IEEE robotics and automation letters}, volume = {5}, number = {4}, pages = {5339-5346}, pmid = {34192136}, issn = {2377-3766}, abstract = {Lio is a mobile robot platform with a multi-functional arm explicitly designed for human-robot interaction and personal care assistant tasks. The robot has already been deployed in several health care facilities, where it is functioning autonomously, assisting staff and patients on an everyday basis. Lio is intrinsically safe by having full coverage in soft artificial-leather material as well as collision detection, limited speed and forces. Furthermore, the robot has a compliant motion controller. A combination of visual, audio, laser, ultrasound and mechanical sensors are used for safe navigation and environment understanding. The ROS-enabled setup allows researchers to access raw sensor data as well as have direct control of the robot. The friendly appearance of Lio has resulted in the robot being well accepted by health care staff and patients. Fully autonomous operation is made possible by a flexible decision engine, autonomous navigation and automatic recharging. Combined with time-scheduled task triggers, this allows Lio to operate throughout the day, with a battery life of up to 8 hours and recharging during idle times. A combination of powerful computing units provides enough processing power to deploy artificial intelligence and deep learning-based solutions on-board the robot without the need to send any sensitive data to cloud services, guaranteeing compliance with privacy requirements. During the COVID-19 pandemic, Lio was rapidly adjusted to perform additional functionality like disinfection and remote elevated body temperature detection. It complies with ISO13482 - Safety requirements for personal care robots, meaning it can be directly tested and deployed in care facilities.}, } @article {pmid34185678, year = {2021}, author = {Fedorov, A and Longabaugh, WJR and Pot, D and Clunie, DA and Pieper, S and Aerts, HJWL and Homeyer, A and Lewis, R and Akbarzadeh, A and Bontempi, D and Clifford, W and Herrmann, MD and Höfener, H and Octaviano, I and Osborne, C and Paquette, S and Petts, J and Punzo, D and Reyes, M and Schacherer, DP and Tian, M and White, G and Ziegler, E and Shmulevich, I and Pihl, T and Wagner, U and Farahani, K and Kikinis, R}, title = {NCI Imaging Data Commons.}, journal = {Cancer research}, volume = {81}, number = {16}, pages = {4188-4193}, pmid = {34185678}, issn = {1538-7445}, support = {P41 EB015898/EB/NIBIB NIH HHS/United States ; HHSN261201500003C/CA/NCI NIH HHS/United States ; HHSN261201000031C/CA/NCI NIH HHS/United States ; HHSN261201500001C/CA/NCI NIH HHS/United States ; HHSN261201500001G/CA/NCI NIH HHS/United States ; HHSN261201500003I/CA/NCI NIH HHS/United States ; P41 EB028741/EB/NIBIB NIH HHS/United States ; HHSN261201500001W/CA/NCI NIH HHS/United States ; }, mesh = {Biomedical Research/trends ; Cloud Computing ; Computational Biology/methods ; Computer Graphics ; Computer Security ; Data Interpretation, Statistical ; Databases, Factual ; Diagnostic Imaging/*methods/standards ; Humans ; Image Processing, Computer-Assisted ; *National Cancer Institute (U.S.) ; Neoplasms/*diagnostic imaging/*genetics ; Pilot Projects ; Programming Languages ; Radiology/methods/standards ; Reproducibility of Results ; Software ; United States ; User-Computer Interface ; }, abstract = {The National Cancer Institute (NCI) Cancer Research Data Commons (CRDC) aims to establish a national cloud-based data science infrastructure. Imaging Data Commons (IDC) is a new component of CRDC supported by the Cancer Moonshot. The goal of IDC is to enable a broad spectrum of cancer researchers, with and without imaging expertise, to easily access and explore the value of deidentified imaging data and to support integrated analyses with nonimaging data. We achieve this goal by colocating versatile imaging collections with cloud-based computing resources and data exploration, visualization, and analysis tools. The IDC pilot was released in October 2020 and is being continuously populated with radiology and histopathology collections. IDC provides access to curated imaging collections, accompanied by documentation, a user forum, and a growing number of analysis use cases that aim to demonstrate the value of a data commons framework applied to cancer imaging research. SIGNIFICANCE: This study introduces NCI Imaging Data Commons, a new repository of the NCI Cancer Research Data Commons, which will support cancer imaging research on the cloud.}, } @article {pmid34185062, year = {2021}, author = {Park, S and Lee, D and Kim, Y and Lim, S and Chae, H and Kim, S}, title = {BioVLAB-Cancer-Pharmacogenomics: tumor heterogeneity and pharmacogenomics analysis of multi-omics data from tumor on the cloud.}, journal = {Bioinformatics (Oxford, England)}, volume = {38}, number = {1}, pages = {275-277}, doi = {10.1093/bioinformatics/btab478}, pmid = {34185062}, issn = {1367-4811}, support = {//Collaborative Genome Program for Fostering New Post-Genome Industry of the National Research Foundation (NRF)/ ; NRF-2014M3C9A3063541//Ministry of Science and ICT (MSIT)/ ; //Korea Health Technology R&D Project through the Korea Health Industry Development Institute (KHIDI)/ ; //Ministry of Health & Welfare/ ; HI15C3224//Republic of Korea/ ; //Bio & Medical Technology Development Program of the National Research Foundation (NRF)/ ; NRF-2019M3E5D4065965//Ministry of Science & ICT/ ; //Bio & Medical Technology Development Program of the National Research Foundation (NRF)/ ; NRF-2019M3E5D307337511//Ministry of Science & ICT/ ; }, mesh = {Humans ; Female ; *Software ; Multiomics ; Pharmacogenetics ; *Breast Neoplasms/drug therapy/genetics ; Databases, Factual ; }, abstract = {MOTIVATION: Multi-omics data in molecular biology has accumulated rapidly over the years. Such data contains valuable information for research in medicine and drug discovery. Unfortunately, data-driven research in medicine and drug discovery is challenging for a majority of small research labs due to the large volume of data and the complexity of analysis pipeline.

RESULTS: We present BioVLAB-Cancer-Pharmacogenomics, a bioinformatics system that facilitates analysis of multi-omics data from breast cancer to analyze and investigate intratumor heterogeneity and pharmacogenomics on Amazon Web Services. Our system takes multi-omics data as input to perform tumor heterogeneity analysis in terms of TCGA data and deconvolve-and-match the tumor gene expression to cell line data in CCLE using DNA methylation profiles. We believe that our system can help small research labs perform analysis of tumor multi-omics without worrying about computational infrastructure and maintenance of databases and tools.

http://biohealth.snu.ac.kr/software/biovlab_cancer_pharmacogenomics.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid34183361, year = {2021}, author = {Leu, MG and Weinberg, ST and Monsen, C and Lehmann, CU and , }, title = {Web Services and Cloud Computing in Pediatric Care.}, journal = {Pediatrics}, volume = {148}, number = {1}, pages = {}, doi = {10.1542/peds.2021-052048}, pmid = {34183361}, issn = {1098-4275}, mesh = {Bilirubin/blood ; Child ; *Cloud Computing ; Computer Security ; Confidentiality ; Consumer Health Information/organization & administration ; Decision Support Systems, Clinical/organization & administration ; Electronic Health Records/*organization & administration ; Humans ; Immunization ; Nomograms ; Pediatrics/*organization & administration ; Practice Guidelines as Topic ; Programming Languages ; *Web Browser ; }, abstract = {Electronic health record (EHR) systems do not uniformly implement pediatric-supportive functionalities. One method of adding these capabilities across EHR platforms is to integrate Web services and Web applications that may perform decision support and store data in the cloud when the EHR platform is able to integrate Web services. Specific examples of these services are described, such as immunization clinical decision support services, consumer health resources, and bilirubin nomograms. Health care providers, EHR vendors, and developers share responsibilities in the appropriate development, integration, and use of Web services and Web applications as they relate to best practices in the areas of data security and confidentiality, technical availability, audit trails, terminology and messaging standards, compliance with the Health Insurance Portability and Accountability Act, testing, usability, and other considerations. It is desirable for health care providers to have knowledge of Web services and Web applications that can improve pediatric capabilities in their own EHRs because this will naturally inform discussions concerning EHR features and facilitate implementation and subsequent use of these capabilities by clinicians caring for children.}, } @article {pmid34177116, year = {2022}, author = {Ahanger, TA and Tariq, U and Nusir, M and Aldaej, A and Ullah, I and Sulman, A}, title = {A novel IoT-fog-cloud-based healthcare system for monitoring and predicting COVID-19 outspread.}, journal = {The Journal of supercomputing}, volume = {78}, number = {2}, pages = {1783-1806}, pmid = {34177116}, issn = {0920-8542}, abstract = {Rapid communication of viral sicknesses is an arising public medical issue across the globe. Out of these, COVID-19 is viewed as the most critical and novel infection nowadays. The current investigation gives an effective framework for the monitoring and prediction of COVID-19 virus infection (C-19VI). To the best of our knowledge, no research work is focused on incorporating IoT technology for C-19 outspread over spatial-temporal patterns. Moreover, limited work has been done in the direction of prediction of C-19 in humans for controlling the spread of COVID-19. The proposed framework includes a four-level architecture for the expectation and avoidance of COVID-19 contamination. The presented model comprises COVID-19 Data Collection (C-19DC) level, COVID-19 Information Classification (C-19IC) level, COVID-19-Mining and Extraction (C-19ME) level, and COVID-19 Prediction and Decision Modeling (C-19PDM) level. Specifically, the presented model is used to empower a person/community to intermittently screen COVID-19 Fever Measure (C-19FM) and forecast it so that proactive measures are taken in advance. Additionally, for prescient purposes, the probabilistic examination of C-19VI is quantified as degree of membership, which is cumulatively characterized as a COVID-19 Fever Measure (C-19FM). Moreover, the prediction is realized utilizing the temporal recurrent neural network. Additionally, based on the self-organized mapping technique, the presence of C-19VI is determined over a geographical area. Simulation is performed over four challenging datasets. In contrast to other strategies, altogether improved outcomes in terms of classification efficiency, prediction viability, and reliability were registered for the introduced model.}, } @article {pmid34177036, year = {2022}, author = {Singh, A and Jindal, V and Sandhu, R and Chang, V}, title = {A scalable framework for smart COVID surveillance in the workplace using Deep Neural Networks and cloud computing.}, journal = {Expert systems}, volume = {39}, number = {3}, pages = {e12704}, pmid = {34177036}, issn = {1468-0394}, abstract = {A smart and scalable system is required to schedule various machine learning applications to control pandemics like COVID-19 using computing infrastructure provided by cloud and fog computing. This paper proposes a framework that considers the use case of smart office surveillance to monitor workplaces for detecting possible violations of COVID effectively. The proposed framework uses deep neural networks, fog computing and cloud computing to develop a scalable and time-sensitive infrastructure that can detect two major violations: wearing a mask and maintaining a minimum distance of 6 feet between employees in the office environment. The proposed framework is developed with the vision to integrate multiple machine learning applications and handle the computing infrastructures for pandemic applications. The proposed framework can be used by application developers for the rapid development of new applications based on the requirements and do not worry about scheduling. The proposed framework is tested for two independent applications and performed better than the traditional cloud environment in terms of latency and response time. The work done in this paper tries to bridge the gap between machine learning applications and their computing infrastructure for COVID-19.}, } @article {pmid34175609, year = {2021}, author = {Elnashar, A and Zeng, H and Wu, B and Fenta, AA and Nabil, M and Duerler, R}, title = {Soil erosion assessment in the Blue Nile Basin driven by a novel RUSLE-GEE framework.}, journal = {The Science of the total environment}, volume = {793}, number = {}, pages = {148466}, doi = {10.1016/j.scitotenv.2021.148466}, pmid = {34175609}, issn = {1879-1026}, mesh = {*Conservation of Natural Resources ; Environmental Monitoring ; Geographic Information Systems ; Soil ; *Soil Erosion ; }, abstract = {Assessment of soil loss and understanding its major drivers are essential to implement targeted management interventions. We have proposed and developed a Revised Universal Soil Loss Equation framework fully implemented in the Google Earth Engine cloud platform (RUSLE-GEE) for high spatial resolution (90 m) soil erosion assessment. Using RUSLE-GEE, we analyzed the soil loss rate for different erosion levels, land cover types, and slopes in the Blue Nile Basin. The results showed that the mean soil loss rate is 39.73, 57.98, and 6.40 t ha[-1] yr[-1] for the entire Blue Nile, Upper Blue Nile, and Lower Blue Nile Basins, respectively. Our results also indicated that soil protection measures should be implemented in approximately 27% of the Blue Nile Basin, as these areas face a moderate to high risk of erosion (>10 t ha[-1] yr[-1]). In addition, downscaling the Tropical Rainfall Measuring Mission (TRMM) precipitation data from 25 km to 1 km spatial resolution significantly impacts rainfall erosivity and soil loss rate. In terms of soil erosion assessment, the study showed the rapid characterization of soil loss rates that could be used to prioritize erosion mitigation plans to support sustainable land resources and tackle land degradation in the Blue Nile Basin.}, } @article {pmid34172112, year = {2021}, author = {Karhade, DS and Roach, J and Shrestha, P and Simancas-Pallares, MA and Ginnis, J and Burk, ZJS and Ribeiro, AA and Cho, H and Wu, D and Divaris, K}, title = {An Automated Machine Learning Classifier for Early Childhood Caries.}, journal = {Pediatric dentistry}, volume = {43}, number = {3}, pages = {191-197}, pmid = {34172112}, issn = {1942-5473}, support = {R03 DE028983/DE/NIDCR NIH HHS/United States ; U01 DE025046/DE/NIDCR NIH HHS/United States ; }, mesh = {Child ; Child, Preschool ; *Dental Caries ; *Dental Caries Susceptibility ; Humans ; Machine Learning ; North Carolina ; Nutrition Surveys ; Prevalence ; }, abstract = {Purpose: The purpose of the study was to develop and evaluate an automated machine learning algorithm (AutoML) for children's classification according to early childhood caries (ECC) status. Methods: Clinical, demographic, behavioral, and parent-reported oral health status information for a sample of 6,404 three- to five-year-old children (mean age equals 54 months) participating in an epidemiologic study of early childhood oral health in North Carolina was used. ECC prevalence (decayed, missing, and filled primary teeth surfaces [dmfs] score greater than zero, using an International Caries Detection and Assessment System score greater than or equal to three caries lesion detection threshold) was 54 percent. Ten sets of ECC predictors were evaluated for ECC classification accuracy (i.e., area under the ROC curve [AUC], sensitivity [Se], and positive predictive value [PPV]) using an AutoML deployment on Google Cloud, followed by internal validation and external replication. Results: A parsimonious model including two terms (i.e., children's age and parent-reported child oral health status: excellent/very good/good/fair/poor) had the highest AUC (0.74), Se (0.67), and PPV (0.64) scores and similar performance using an external National Health and Nutrition Examination Survey (NHANES) dataset (AUC equals 0.80, Se equals 0.73, PPV equals 0.49). Contrarily, a comprehensive model with 12 variables covering demographics (e.g., race/ethnicity, parental education), oral health behaviors, fluoride exposure, and dental home had worse performance (AUC equals 0.66, Se equals 0.54, PPV equals 0.61). Conclusions: Parsimonious automated machine learning early childhood caries classifiers, including single-item self-reports, can be valuable for ECC screening. The classifier can accommodate biological information that can help improve its performance in the future.}, } @article {pmid34155435, year = {2021}, author = {El Motaki, S and Yahyaouy, A and Gualous, H and Sabor, J}, title = {A new weighted fuzzy C-means clustering for workload monitoring in cloud datacenter platforms.}, journal = {Cluster computing}, volume = {24}, number = {4}, pages = {3367-3379}, pmid = {34155435}, issn = {1386-7857}, abstract = {The rapid growth in virtualization solutions has driven the widespread adoption of cloud computing paradigms among various industries and applications. This has led to a growing need for XaaS solutions and equipment to enable teleworking. To meet this need, cloud operators and datacenters have to overtake several challenges related to continuity, the quality of services provided, data security, and anomaly detection issues. Mainly, anomaly detection methods play a critical role in detecting virtual machines' abnormal behaviours that can potentially violate service level agreements established with users. Unsupervised machine learning techniques are among the most commonly used technologies for implementing anomaly detection systems. This paper introduces a novel clustering approach for analyzing virtual machine behaviour while running workloads in a system based on resource usage details (such as CPU utilization and downtime events). The proposed algorithm is inspired by the intuitive mechanism of flocking birds in nature to form reasonable clusters. Each starling movement's direction depends on self-information and information provided by other close starlings during the flight. Analogically, after associating a weight with each data sample to guide the formation of meaningful groups, each data element determines its next position in the feature space based on its current position and surroundings. Based on a realistic dataset and clustering validity indices, the experimental evaluation shows that the new weighted fuzzy c-means algorithm provides interesting results and outperforms the corresponding standard algorithm (weighted fuzzy c-means).}, } @article {pmid34155424, year = {2021}, author = {Donato, L and Scimone, C and Rinaldi, C and D'Angelo, R and Sidoti, A}, title = {New evaluation methods of read mapping by 17 aligners on simulated and empirical NGS data: an updated comparison of DNA- and RNA-Seq data from Illumina and Ion Torrent technologies.}, journal = {Neural computing & applications}, volume = {33}, number = {22}, pages = {15669-15692}, pmid = {34155424}, issn = {0941-0643}, abstract = {UNLABELLED: During the last (15) years, improved omics sequencing technologies have expanded the scale and resolution of various biological applications, generating high-throughput datasets that require carefully chosen software tools to be processed. Therefore, following the sequencing development, bioinformatics researchers have been challenged to implement alignment algorithms for next-generation sequencing reads. However, nowadays selection of aligners based on genome characteristics is poorly studied, so our benchmarking study extended the "state of art" comparing 17 different aligners. The chosen tools were assessed on empirical human DNA- and RNA-Seq data, as well as on simulated datasets in human and mouse, evaluating a set of parameters previously not considered in such kind of benchmarks. As expected, we found that each tool was the best in specific conditions. For Ion Torrent single-end RNA-Seq samples, the most suitable aligners were CLC and BWA-MEM, which reached the best results in terms of efficiency, accuracy, duplication rate, saturation profile and running time. About Illumina paired-end osteomyelitis transcriptomics data, instead, the best performer algorithm, together with the already cited CLC, resulted Novoalign, which excelled in accuracy and saturation analyses. Segemehl and DNASTAR performed the best on both DNA-Seq data, with Segemehl particularly suitable for exome data. In conclusion, our study could guide users in the selection of a suitable aligner based on genome and transcriptome characteristics. However, several other aspects, emerged from our work, should be considered in the evolution of alignment research area, such as the involvement of artificial intelligence to support cloud computing and mapping to multiple genomes.

SUPPLEMENTARY INFORMATION: The online version contains supplementary material available at 10.1007/s00521-021-06188-z.}, } @article {pmid34153189, year = {2021}, author = {Bichmann, L and Gupta, S and Rosenberger, G and Kuchenbecker, L and Sachsenberg, T and Ewels, P and Alka, O and Pfeuffer, J and Kohlbacher, O and Röst, H}, title = {DIAproteomics: A Multifunctional Data Analysis Pipeline for Data-Independent Acquisition Proteomics and Peptidomics.}, journal = {Journal of proteome research}, volume = {20}, number = {7}, pages = {3758-3766}, doi = {10.1021/acs.jproteome.1c00123}, pmid = {34153189}, issn = {1535-3907}, mesh = {*Data Analysis ; Mass Spectrometry ; *Proteomics ; Reproducibility of Results ; Software ; }, abstract = {Data-independent acquisition (DIA) is becoming a leading analysis method in biomedical mass spectrometry. The main advantages include greater reproducibility and sensitivity and a greater dynamic range compared with data-dependent acquisition (DDA). However, the data analysis is complex and often requires expert knowledge when dealing with large-scale data sets. Here we present DIAproteomics, a multifunctional, automated, high-throughput pipeline implemented in the Nextflow workflow management system that allows one to easily process proteomics and peptidomics DIA data sets on diverse compute infrastructures. The central components are well-established tools such as the OpenSwathWorkflow for the DIA spectral library search and PyProphet for the false discovery rate assessment. In addition, it provides options to generate spectral libraries from existing DDA data and to carry out the retention time and chromatogram alignment. The output includes annotated tables and diagnostic visualizations from the statistical postprocessing and computation of fold-changes across pairwise conditions, predefined in an experimental design. DIAproteomics is well documented open-source software and is available under a permissive license to the scientific community at https://www.openms.de/diaproteomics/.}, } @article {pmid34143822, year = {2021}, author = {Li, J and Peng, B and Wei, Y and Ye, H}, title = {Accurate extraction of surface water in complex environment based on Google Earth Engine and Sentinel-2.}, journal = {PloS one}, volume = {16}, number = {6}, pages = {e0253209}, pmid = {34143822}, issn = {1932-6203}, mesh = {Environmental Monitoring/*methods ; *Satellite Imagery ; Sri Lanka ; *Water ; *Water Resources ; }, abstract = {To realize the accurate extraction of surface water in complex environment, this study takes Sri Lanka as the study area owing to the complex geography and various types of water bodies. Based on Google Earth engine and Sentinel-2 images, an automatic water extraction model in complex environment(AWECE) was developed. The accuracy of water extraction by AWECE, NDWI, MNDWI and the revised version of multi-spectral water index (MuWI-R) models was evaluated from visual interpretation and quantitative analysis. The results show that the AWECE model could significantly improve the accuracy of water extraction in complex environment, with an overall accuracy of 97.16%, and an extremely low omission error (0.74%) and commission error (2.35%). The AEWCE model could effectively avoid the influence of cloud shadow, mountain shadow and paddy soil on water extraction accuracy. The model can be widely applied in cloudy, mountainous and other areas with complex environments, which has important practical significance for water resources investigation, monitoring and protection.}, } @article {pmid34141897, year = {2021}, author = {Azhir, E and Jafari Navimipour, N and Hosseinzadeh, M and Sharifi, A and Darwesh, A}, title = {A technique for parallel query optimization using MapReduce framework and a semantic-based clustering method.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e580}, pmid = {34141897}, issn = {2376-5992}, abstract = {Query optimization is the process of identifying the best Query Execution Plan (QEP). The query optimizer produces a close to optimal QEP for the given queries based on the minimum resource usage. The problem is that for a given query, there are plenty of different equivalent execution plans, each with a corresponding execution cost. To produce an effective query plan thus requires examining a large number of alternative plans. Access plan recommendation is an alternative technique to database query optimization, which reuses the previously-generated QEPs to execute new queries. In this technique, the query optimizer uses clustering methods to identify groups of similar queries. However, clustering such large datasets is challenging for traditional clustering algorithms due to huge processing time. Numerous cloud-based platforms have been introduced that offer low-cost solutions for the processing of distributed queries such as Hadoop, Hive, Pig, etc. This paper has applied and tested a model for clustering variant sizes of large query datasets parallelly using MapReduce. The results demonstrate the effectiveness of the parallel implementation of query workloads clustering to achieve good scalability.}, } @article {pmid34136534, year = {2021}, author = {Inamura, T and Mizuchi, Y}, title = {SIGVerse: A Cloud-Based VR Platform for Research on Multimodal Human-Robot Interaction.}, journal = {Frontiers in robotics and AI}, volume = {8}, number = {}, pages = {549360}, pmid = {34136534}, issn = {2296-9144}, abstract = {Research on Human-Robot Interaction (HRI) requires the substantial consideration of an experimental design, as well as a significant amount of time to practice the subject experiment. Recent technology in virtual reality (VR) can potentially address these time and effort challenges. The significant advantages of VR systems for HRI are: 1) cost reduction, as experimental facilities are not required in a real environment; 2) provision of the same environmental and embodied interaction conditions to test subjects; 3) visualization of arbitrary information and situations that cannot occur in reality, such as playback of past experiences, and 4) ease of access to an immersive and natural interface for robot/avatar teleoperations. Although VR tools with their features have been applied and developed in previous HRI research, all-encompassing tools or frameworks remain unavailable. In particular, the benefits of integration with cloud computing have not been comprehensively considered. Hence, the purpose of this study is to propose a research platform that can comprehensively provide the elements required for HRI research by integrating VR and cloud technologies. To realize a flexible and reusable system, we developed a real-time bridging mechanism between the robot operating system (ROS) and Unity. To confirm the feasibility of the system in a practical HRI scenario, we applied the proposed system to three case studies, including a robot competition named RoboCup@Home. via these case studies, we validated the system's usefulness and its potential for the development and evaluation of social intelligence via multimodal HRI.}, } @article {pmid34136134, year = {2021}, author = {Paul-Gilloteaux, P and Tosi, S and Hériché, JK and Gaignard, A and Ménager, H and Marée, R and Baecker, V and Klemm, A and Kalaš, M and Zhang, C and Miura, K and Colombelli, J}, title = {Bioimage analysis workflows: community resources to navigate through a complex ecosystem.}, journal = {F1000Research}, volume = {10}, number = {}, pages = {320}, pmid = {34136134}, issn = {2046-1402}, mesh = {Algorithms ; *Computational Biology ; *Ecosystem ; Information Storage and Retrieval ; Workflow ; }, abstract = {Workflows are the keystone of bioimage analysis, and the NEUBIAS (Network of European BioImage AnalystS) community is trying to gather the actors of this field and organize the information around them. One of its most recent outputs is the opening of the F1000Research NEUBIAS gateway, whose main objective is to offer a channel of publication for bioimage analysis workflows and associated resources. In this paper we want to express some personal opinions and recommendations related to finding, handling and developing bioimage analysis workflows. The emergence of "big data" in bioimaging and resource-intensive analysis algorithms make local data storage and computing solutions a limiting factor. At the same time, the need for data sharing with collaborators and a general shift towards remote work, have created new challenges and avenues for the execution and sharing of bioimage analysis workflows. These challenges are to reproducibly run workflows in remote environments, in particular when their components come from different software packages, but also to document them and link their parameters and results by following the FAIR principles (Findable, Accessible, Interoperable, Reusable) to foster open and reproducible science. In this opinion paper, we focus on giving some directions to the reader to tackle these challenges and navigate through this complex ecosystem, in order to find and use workflows, and to compare workflows addressing the same problem. We also discuss tools to run workflows in the cloud and on High Performance Computing resources, and suggest ways to make these workflows FAIR.}, } @article {pmid34127909, year = {2023}, author = {Tan, C and Lin, J}, title = {A new QoE-based prediction model for evaluating virtual education systems with COVID-19 side effects using data mining.}, journal = {Soft computing}, volume = {27}, number = {3}, pages = {1699-1713}, pmid = {34127909}, issn = {1432-7643}, abstract = {Today, emerging technologies such as 5G Internet of things (IoT), virtual reality and cloud-edge computing have enhanced and upgraded higher education environments in universities, colleagues and research centers. Computer-assisted learning systems with aggregating IoT applications and smart devices have improved the e-learning systems by enabling remote monitoring and screening of the behavioral aspects of teaching and education scores of students. On the other side, educational data mining has improved the higher education systems by predicting and analyzing the behavioral aspects of teaching and education scores of students. Due to an unexpected and huge increase in the number of patients during coronavirus (COVID-19) pandemic, all universities, campuses, schools, research centers, many scientific collaborations and meetings have closed and forced to initiate online teaching, e-learning and virtual meeting. Due to importance of behavioral aspects of teaching and education between lecturers and students, prediction of quality of experience (QoE) in virtual education systems is a critical issue. This paper presents a new prediction model to detect technical aspects of teaching and e-learning in virtual education systems using data mining. Association rules mining and supervised techniques are applied to detect efficient QoE factors on virtual education systems. The experimental results described that the suggested prediction model meets the proper accuracy, precision and recall factors for predicting the behavioral aspects of teaching and e-learning for students in virtual education systems.}, } @article {pmid34126874, year = {2021}, author = {Abbasi, WA and Abbas, SA and Andleeb, S}, title = {PANDA: Predicting the change in proteins binding affinity upon mutations by finding a signal in primary structures.}, journal = {Journal of bioinformatics and computational biology}, volume = {19}, number = {4}, pages = {2150015}, doi = {10.1142/S0219720021500153}, pmid = {34126874}, issn = {1757-6334}, mesh = {Amino Acid Sequence ; *Machine Learning ; Mutation ; Protein Binding ; *Proteins/genetics/metabolism ; }, abstract = {Accurately determining a change in protein binding affinity upon mutations is important to find novel therapeutics and to assist mutagenesis studies. Determination of change in binding affinity upon mutations requires sophisticated, expensive, and time-consuming wet-lab experiments that can be supported with computational methods. Most of the available computational prediction techniques depend upon protein structures that bound their applicability to only protein complexes with recognized 3D structures. In this work, we explore the sequence-based prediction of change in protein binding affinity upon mutation and question the effectiveness of [Formula: see text]-fold cross-validation (CV) across mutations adopted in previous studies to assess the generalization ability of such predictors with no known mutation during training. We have used protein sequence information instead of protein structures along with machine learning techniques to accurately predict the change in protein binding affinity upon mutation. Our proposed sequence-based novel change in protein binding affinity predictor called PANDA performs comparably to the existing methods gauged through an appropriate CV scheme and an external independent test dataset. On an external test dataset, our proposed method gives a maximum Pearson correlation coefficient of 0.52 in comparison to the state-of-the-art existing protein structure-based method called MutaBind which gives a maximum Pearson correlation coefficient of 0.59. Our proposed protein sequence-based method, to predict a change in binding affinity upon mutations, has wide applicability and comparable performance in comparison to existing protein structure-based methods. We made PANDA easily accessible through a cloud-based webserver and python code available at https://sites.google.com/view/wajidarshad/software and https://github.com/wajidarshad/panda, respectively.}, } @article {pmid34121460, year = {2021}, author = {Laske, TG and Garshelis, DL and Iles, TL and Iaizzo, PA}, title = {An engineering perspective on the development and evolution of implantable cardiac monitors in free-living animals.}, journal = {Philosophical transactions of the Royal Society of London. Series B, Biological sciences}, volume = {376}, number = {1830}, pages = {20200217}, pmid = {34121460}, issn = {1471-2970}, mesh = {Animals ; Caniformia/*physiology ; Engineering/*instrumentation ; Heart Function Tests/instrumentation/*veterinary ; Monitoring, Ambulatory/instrumentation/veterinary ; Physiology/*instrumentation ; }, abstract = {The latest technologies associated with implantable physiological monitoring devices can record multiple channels of data (including: heart rates and rhythms, activity, temperature, impedance and posture), and coupled with powerful software applications, have provided novel insights into the physiology of animals in the wild. This perspective details past challenges and lessons learned from the uses and developments of implanted biologgers designed for human clinical application in our research on free-ranging American black bears (Ursus americanus). In addition, we reference other research by colleagues and collaborators who have leveraged these devices in their work, including: brown bears (Ursus arctos), grey wolves (Canis lupus), moose (Alces alces), maned wolves (Chrysocyon brachyurus) and southern elephant seals (Mirounga leonina). We also discuss the potentials for applications of such devices across a range of other species. To date, the devices described have been used in fifteen different wild species, with publications pending in many instances. We have focused our physiological research on the analyses of heart rates and rhythms and thus special attention will be paid to this topic. We then discuss some major expected step changes such as improvements in sensing algorithms, data storage, and the incorporation of next-generation short-range wireless telemetry. The latter provides new avenues for data transfer, and when combined with cloud-based computing, it not only provides means for big data storage but also the ability to readily leverage high-performance computing platforms using artificial intelligence and machine learning algorithms. These advances will dramatically increase both data quantity and quality and will facilitate the development of automated recognition of extreme physiological events or key behaviours of interest in a broad array of environments, thus further aiding wildlife monitoring and management. This article is part of the theme issue 'Measuring physiology in free-living animals (Part I)'.}, } @article {pmid34109196, year = {2021}, author = {Macdonald, JC and Isom, DC and Evans, DD and Page, KJ}, title = {Digital Innovation in Medicinal Product Regulatory Submission, Review, and Approvals to Create a Dynamic Regulatory Ecosystem-Are We Ready for a Revolution?.}, journal = {Frontiers in medicine}, volume = {8}, number = {}, pages = {660808}, pmid = {34109196}, issn = {2296-858X}, abstract = {The pace of scientific progress over the past several decades within the biological, drug development, and the digital realm has been remarkable. The'omics revolution has enabled a better understanding of the biological basis of disease, unlocking the possibility of new products such as gene and cell therapies which offer novel patient centric solutions. Innovative approaches to clinical trial designs promise greater efficiency, and in recent years, scientific collaborations, and consortia have been developing novel approaches to leverage new sources of evidence such as real-world data, patient experience data, and biomarker data. Alongside this there have been great strides in digital innovation. Cloud computing has become mainstream and the internet of things and blockchain technology have become a reality. These examples of transformation stand in sharp contrast to the current inefficient approach for regulatory submission, review, and approval of medicinal products. This process has not fundamentally changed since the beginning of medicine regulation in the late 1960s. Fortunately, progressive initiatives are emerging that will enrich and streamline regulatory decision making and deliver patient centric therapies, if they are successful in transforming the current transactional construct and harnessing scientific and technological advances. Such a radical transformation will not be simple for both regulatory authorities and company sponsors, nor will progress be linear. We examine the shortcomings of the current system with its entrenched and variable business processes, offer examples of progress as catalysts for change, and make the case for a new cloud based model. To optimize navigation toward this reality we identify implications and regulatory design questions which must be addressed. We conclude that a new model is possible and is slowly emerging through cumulative change initiatives that question, challenge, and redesign best practices, roles, and responsibilities, and that this must be combined with adaptation of behaviors and acquisition of new skills.}, } @article {pmid34101767, year = {2021}, author = {Abdel-Kader, RF and El-Sayad, NE and Rizk, RY}, title = {Efficient energy and completion time for dependent task computation offloading algorithm in industry 4.0.}, journal = {PloS one}, volume = {16}, number = {6}, pages = {e0252756}, pmid = {34101767}, issn = {1932-6203}, mesh = {Cloud Computing ; Industry/instrumentation/*methods ; *Internet of Things ; *Software ; }, abstract = {Rapid technological development has revolutionized the industrial sector. Internet of Things (IoT) started to appear in many fields, such as health care and smart cities. A few years later, IoT was supported by industry, leading to what is called Industry 4.0. In this paper, a cloud-assisted fog-networking architecture is implemented in an IoT environment with a three-layer network. An efficient energy and completion time for dependent task computation offloading (ET-DTCO) algorithm is proposed, and it considers two quality-of-service (QoS) parameters: efficient energy and completion time offloading for dependent tasks in Industry 4.0. The proposed solution employs the Firefly algorithm to optimize the process of the selection-offloading computing mode and determine the optimal solution for performing tasks locally or offloaded to a fog or cloud considering the task dependency. Moreover, the proposed algorithm is compared with existing techniques. Simulation results proved that the proposed ET-DTCO algorithm outperforms other offloading algorithms in minimizing energy consumption and completion time while enhancing the overall efficiency of the system.}, } @article {pmid34095775, year = {2021}, author = {O'Grady, N and Gibbs, DL and Abdilleh, K and Asare, A and Asare, S and Venters, S and Brown-Swigart, L and Hirst, GL and Wolf, D and Yau, C and van 't Veer, LJ and Esserman, L and Basu, A}, title = {PRoBE the cloud toolkit: finding the best biomarkers of drug response within a breast cancer clinical trial.}, journal = {JAMIA open}, volume = {4}, number = {2}, pages = {ooab038}, pmid = {34095775}, issn = {2574-2531}, abstract = {OBJECTIVES: In this paper, we discuss leveraging cloud-based platforms to collect, visualize, analyze, and share data in the context of a clinical trial. Our cloud-based infrastructure, Patient Repository of Biomolecular Entities (PRoBE), has given us the opportunity for uniform data structure, more efficient analysis of valuable data, and increased collaboration between researchers.

MATERIALS AND METHODS: We utilize a multi-cloud platform to manage and analyze data generated from the clinical Investigation of Serial Studies to Predict Your Therapeutic Response with Imaging And moLecular Analysis 2 (I-SPY 2 TRIAL). A collaboration with the Institute for Systems Biology Cancer Gateway in the Cloud has additionally given us access to public genomic databases. Applications to I-SPY 2 data have been built using R Shiny, while leveraging Google's BigQuery tables and SQL commands for data mining.

RESULTS: We highlight the implementation of PRoBE in several unique case studies including prediction of biomarkers associated with clinical response, access to the Pan-Cancer Atlas, and integrating pathology images within the cloud. Our data integration pipelines, documentation, and all codebase will be placed in a Github repository.

DISCUSSION AND CONCLUSION: We are hoping to develop risk stratification diagnostics by integrating additional molecular, magnetic resonance imaging, and pathology markers into PRoBE to better predict drug response. A robust cloud infrastructure and tool set can help integrate these large datasets to make valuable predictions of response to multiple agents. For that reason, we are continuously improving PRoBE to advance the way data is stored, accessed, and analyzed in the I-SPY 2 clinical trial.}, } @article {pmid34095236, year = {2021}, author = {Kapitonov, A and Lonshakov, S and Bulatov, V and Montazam, BK and White, J}, title = {Robot-as-a-Service: From Cloud to Peering Technologies.}, journal = {Frontiers in robotics and AI}, volume = {8}, number = {}, pages = {560829}, pmid = {34095236}, issn = {2296-9144}, abstract = {This article is devoted to the historical overview of the Robot-as-a-Service concept. Several major scientific publications on the development of Robot-as-a-Service systems based on a service-oriented paradigm are considered. Much attention is paid to the analysis of a centralized approach in the development using cloud computing services and the search for the limitations of this approach. As a result, general conclusions on the reviewed publications are given, as well as the authors' own vision of Robot-as-a-Service systems based on the concept of robot economics.}, } @article {pmid34092679, year = {2021}, author = {Li, F and Shankar, A and Santhosh Kumar, B}, title = {Fog-Internet of things-assisted multi-sensor intelligent monitoring model to analyze the physical health condition.}, journal = {Technology and health care : official journal of the European Society for Engineering and Medicine}, volume = {29}, number = {6}, pages = {1319-1337}, doi = {10.3233/THC-213009}, pmid = {34092679}, issn = {1878-7401}, mesh = {Cloud Computing ; Delivery of Health Care ; Humans ; *Internet of Things ; *Telemedicine ; *Wearable Electronic Devices ; }, abstract = {BACKGROUND: Internet of Things (IoT) technology provides a tremendous and structured solution to tackle service deliverance aspects of healthcare in terms of mobile health and remote patient tracking. In medicine observation applications, IoT and cloud computing serves as an assistant in the health sector and plays an incredibly significant role. Health professionals and technicians have built an excellent platform for people with various illnesses, leveraging principles of wearable technology, wireless channels, and other remote devices for low-cost healthcare monitoring.

OBJECTIVE: This paper proposed the Fog-IoT-assisted multisensor intelligent monitoring model (FIoT-MIMM) for analyzing the patient's physical health condition.

METHOD: The proposed system uses a multisensor device for collecting biometric and medical observing data. The main point is to continually generate emergency alerts on mobile phones from the fog system to users. For the precautionary steps and suggestions for patients' health, a fog layer's temporal information is used.

RESULTS: Experimental findings show that the proposed FIoT-MIMM model has less response time and high accuracy in determining a patient's condition than other existing methods. Furthermore, decision making based on real-time healthcare information further improves the utility of the suggested model.}, } @article {pmid34092673, year = {2021}, author = {Cui, M and Baek, SS and Crespo, RG and Premalatha, R}, title = {Internet of things-based cloud computing platform for analyzing the physical health condition.}, journal = {Technology and health care : official journal of the European Society for Engineering and Medicine}, volume = {29}, number = {6}, pages = {1233-1247}, doi = {10.3233/THC-213003}, pmid = {34092673}, issn = {1878-7401}, mesh = {*Cloud Computing ; Computer Systems ; Delivery of Health Care ; Humans ; *Internet of Things ; Models, Theoretical ; *Physical Fitness ; }, abstract = {BACKGROUND: Health monitoring is important for early disease diagnosis and will reduce the discomfort and treatment expenses, which is very relevant in terms of prevention. The early diagnosis and treatment of multiple conditions will improve solutions to the patient's healthcare radically. A concept model for the real-time patient tracking system is the primary goal of the method. The Internet of things (IoT) has made health systems accessible for programs based on the value of patient health.

OBJECTIVE: In this paper, the IoT-based cloud computing for patient health monitoring framework (IoT-CCPHM), has been proposed for effective monitoring of the patients.

METHOD: The emerging connected sensors and IoT devices monitor and test the cardiac speed, oxygen saturation percentage, body temperature, and patient's eye movement. The collected data are used in the cloud database to evaluate the patient's health, and the effects of all measures are stored. The IoT-CCPHM maintains that the medical record is processed in the cloud servers.

RESULTS: The experimental results show that patient health monitoring is a reliable way to improve health effectively.}, } @article {pmid34086595, year = {2021}, author = {Weinstein, RS and Holcomb, MJ and Mo, J and Yonsetto, P and Bojorquez, O and Grant, M and Wendel, CS and Tallman, NJ and Ercolano, E and Cidav, Z and Hornbrook, MC and Sun, V and McCorkle, R and Krouse, RS}, title = {An Ostomy Self-management Telehealth Intervention for Cancer Survivors: Technology-Related Findings From a Randomized Controlled Trial.}, journal = {Journal of medical Internet research}, volume = {23}, number = {9}, pages = {e26545}, pmid = {34086595}, issn = {1438-8871}, mesh = {*Cancer Survivors ; Humans ; *Neoplasms ; *Ostomy ; *Self-Management ; Technology ; *Telemedicine ; }, abstract = {BACKGROUND: An Ostomy Self-management Telehealth (OSMT) intervention by nurse educators and peer ostomates can equip new ostomates with critical knowledge regarding ostomy care. A telehealth technology assessment aim was to measure telehealth engineer support requirements for telehealth technology-related (TTR) incidents encountered during OSMT intervention sessions held via a secure cloud-based videoconferencing service, Zoom for Healthcare.

OBJECTIVE: This paper examines technology-related challenges, issues, and opportunities encountered in the use of telehealth in a randomized controlled trial intervention for cancer survivors living with a permanent ostomy.

METHODS: The Arizona Telemedicine Program provided telehealth engineering support for 105 OSMT sessions, scheduled for 90 to 120 minutes each, over a 2-year period. The OSMT groups included up to 15 participants, comprising 4-6 ostomates, 4-6 peer ostomates, 2 nurse educators, and 1 telehealth engineer. OSMT-session TTR incidents were recorded contemporaneously in detailed notes by the research staff. TTR incidents were categorized and tallied.

RESULTS: A total of 97.1% (102/105) OSMT sessions were completed as scheduled. In total, 3 OSMT sessions were not held owing to non-technology-related reasons. Of the 93 ostomates who participated in OSMT sessions, 80 (86%) completed their OSMT curriculum. TTR incidents occurred in 36.3% (37/102) of the completed sessions with varying disruptive impacts. No sessions were canceled or rescheduled because of TTR incidents. Disruptions from TTR incidents were minimized by following the TTR incident prevention and incident response plans.

CONCLUSIONS: Telehealth videoconferencing technology can enable ostomates to participate in ostomy self-management education by incorporating dedicated telehealth engineering support. Potentially, OSMT greatly expands the availability of ostomy self-management education for new ostomates.

TRIAL REGISTRATION: ClinicalTrials.gov NCT02974634; https://clinicaltrials.gov/ct2/show/NCT02974634.}, } @article {pmid34086476, year = {2021}, author = {Lin, Z and Zou, J and Liu, S and Peng, C and Li, Z and Wan, X and Fang, D and Yin, J and Gobbo, G and Chen, Y and Ma, J and Wen, S and Zhang, P and Yang, M}, title = {A Cloud Computing Platform for Scalable Relative and Absolute Binding Free Energy Predictions: New Opportunities and Challenges for Drug Discovery.}, journal = {Journal of chemical information and modeling}, volume = {61}, number = {6}, pages = {2720-2732}, doi = {10.1021/acs.jcim.0c01329}, pmid = {34086476}, issn = {1549-960X}, mesh = {Artificial Intelligence ; *Cloud Computing ; *Drug Discovery ; Entropy ; Thermodynamics ; }, abstract = {Free energy perturbation (FEP) has become widely used in drug discovery programs for binding affinity prediction between candidate compounds and their biological targets. However, limitations of FEP applications also exist, including, but not limited to, high cost, long waiting time, limited scalability, and breadth of application scenarios. To overcome these problems, we have developed XFEP, a scalable cloud computing platform for both relative and absolute free energy predictions using optimized simulation protocols. XFEP enables large-scale FEP calculations in a more efficient, scalable, and affordable way, for example, the evaluation of 5000 compounds can be performed in 1 week using 50-100 GPUs with a computing cost roughly equivalent to the cost for the synthesis of only one new compound. By combining these capabilities with artificial intelligence techniques for goal-directed molecule generation and evaluation, new opportunities can be explored for FEP applications in the drug discovery stages of hit identification, hit-to-lead, and lead optimization based not only on structure exploitation within the given chemical series but also including evaluation and comparison of completely unrelated molecules during structure exploration in a larger chemical space. XFEP provides the basis for scalable FEP applications to become more widely used in drug discovery projects and to speed up the drug discovery process from hit identification to preclinical candidate compound nomination.}, } @article {pmid34084936, year = {2021}, author = {Heidari, A and Jafari Navimipour, N}, title = {A new SLA-aware method for discovering the cloud services using an improved nature-inspired optimization algorithm.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e539}, pmid = {34084936}, issn = {2376-5992}, abstract = {Cloud computing is one of the most important computing patterns that use a pay-as-you-go manner to process data and execute applications. Therefore, numerous enterprises are migrating their applications to cloud environments. Not only do intensive applications deal with enormous quantities of data, but they also demonstrate compute-intensive properties very frequently. The dynamicity, coupled with the ambiguity between marketed resources and resource requirement queries from users, remains important issues that hamper efficient discovery in a cloud environment. Cloud service discovery becomes a complex problem because of the increase in network size and complexity. Complexity and network size keep increasing dynamically, making it a complex NP-hard problem that requires effective service discovery approaches. One of the most famous cloud service discovery methods is the Ant Colony Optimization (ACO) algorithm; however, it suffers from a load balancing problem among the discovered nodes. If the workload balance is inefficient, it limits the use of resources. This paper solved this problem by applying an Inverted Ant Colony Optimization (IACO) algorithm for load-aware service discovery in cloud computing. The IACO considers the pheromones' repulsion instead of attraction. We design a model for service discovery in the cloud environment to overcome the traditional shortcomings. Numerical results demonstrate that the proposed mechanism can obtain an efficient service discovery method. The algorithm is simulated using a CloudSim simulator, and the result shows better performance. Reducing energy consumption, mitigate response time, and better Service Level Agreement (SLA) violation in the cloud environments are the advantages of the proposed method.}, } @article {pmid34084926, year = {2021}, author = {Ali, A and Ahmed, M and Khan, A and Anjum, A and Ilyas, M and Helfert, M}, title = {VisTAS: blockchain-based visible and trusted remote authentication system.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e516}, pmid = {34084926}, issn = {2376-5992}, abstract = {The information security domain focuses on security needs at all levels in a computing environment in either the Internet of Things, Cloud Computing, Cloud of Things, or any other implementation. Data, devices, services, or applications and communication are required to be protected and provided by information security shields at all levels and in all working states. Remote authentication is required to perform different administrative operations in an information system, and Administrators have full access to the system and may pose insider threats. Superusers and administrators are the most trusted persons in an organisation. "Trust but verify" is an approach to have an eye on the superusers and administrators. Distributed ledger technology (Blockchain-based data storage) is an immutable data storage scheme and provides a built-in facility to share statistics among peers. Distributed ledgers are proposed to provide visible security and non-repudiation, which securely records administrators' authentications requests. The presence of security, privacy, and accountability measures establish trust among its stakeholders. Securing information in an electronic data processing system is challenging, i.e., providing services and access control for the resources to only legitimate users. Authentication plays a vital role in systems' security; therefore, authentication and identity management are the key subjects to provide information security services. The leading cause of information security breaches is the failure of identity management/authentication systems and insider threats. In this regard, visible security measures have more deterrence than other schemes. In this paper, an authentication scheme, "VisTAS," has been introduced, which provides visible security and trusted authentication services to the tenants and keeps the records in the blockchain.}, } @article {pmid34084925, year = {2021}, author = {Cambronero, ME and Bernal, A and Valero, V and Cañizares, PC and Núñez, A}, title = {Profiling SLAs for cloud system infrastructures and user interactions.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e513}, pmid = {34084925}, issn = {2376-5992}, abstract = {Cloud computing has emerged as a cutting-edge technology which is widely used by both private and public institutions, since it eliminates the capital expense of buying, maintaining, and setting up both hardware and software. Clients pay for the services they use, under the so-called Service Level Agreements (SLAs), which are the contracts that establish the terms and costs of the services. In this paper, we propose the CloudCost UML profile, which allows the modeling of cloud architectures and the users' behavior when they interact with the cloud to request resources. We then investigate how to increase the profits of cloud infrastructures by using price schemes. For this purpose, we distinguish between two types of users in the SLAs: regular and high-priority users. Regular users do not require a continuous service, so they can wait to be attended to. In contrast, high-priority users require a constant and immediate service, so they pay a greater price for their services. In addition, a computer-aided design tool, called MSCC (Modeling SLAs Cost Cloud), has been implemented to support the CloudCost profile, which enables the creation of specific cloud scenarios, as well as their edition and validation. Finally, we present a complete case study to illustrate the applicability of the CloudCost profile, thus making it possible to draw conclusions about how to increase the profits of the cloud infrastructures studied by adjusting the different cloud parameters and the resource configuration.}, } @article {pmid34073726, year = {2021}, author = {Tropea, M and De Rango, F and Nevigato, N and Bitonti, L and Pupo, F}, title = {SCARE: A Novel Switching and Collision Avoidance pRocEss for Connected Vehicles Using Virtualization and Edge Computing Paradigm.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {11}, pages = {}, pmid = {34073726}, issn = {1424-8220}, abstract = {In this paper, some collision avoidance systems based on MEC in a VANET environment are proposed and investigated. Micro services at edge are considered to support service continuity in vehicle communication and advertising. This considered system makes use of cloud and edge computing, allowing to switch communication from edge to cloud server and vice versa when possible, trying to guarantee the required constraints and balancing the communication among the servers. Simulation results were used to evaluate the performance of three considered mechanisms: the first one considering only edge with load balancing, the second one using edge/cloud switching and the third one using edge with load balancing and collision avoidance advertising.}, } @article {pmid34072637, year = {2021}, author = {Li, DC and Huang, CT and Tseng, CW and Chou, LD}, title = {Fuzzy-Based Microservice Resource Management Platform for Edge Computing in the Internet of Things.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {11}, pages = {}, pmid = {34072637}, issn = {1424-8220}, support = {108-2221-E-008-033-MY3 and 105-2221-E-008-071-MY.//Ministry of Science and Technology, Taiwan/ ; }, abstract = {Edge computing exhibits the advantages of real-time operation, low latency, and low network cost. It has become a key technology for realizing smart Internet of Things applications. Microservices are being used by an increasing number of edge computing networks because of their sufficiently small code, reduced program complexity, and flexible deployment. However, edge computing has more limited resources than cloud computing, and thus edge computing networks have higher requirements for the overall resource scheduling of running microservices. Accordingly, the resource management of microservice applications in edge computing networks is a crucial issue. In this study, we developed and implemented a microservice resource management platform for edge computing networks. We designed a fuzzy-based microservice computing resource scaling (FMCRS) algorithm that can dynamically control the resource expansion scale of microservices. We proposed and implemented two microservice resource expansion methods based on the resource usage of edge network computing nodes. We conducted the experimental analysis in six scenarios and the experimental results proved that the designed microservice resource management platform can reduce the response time for microservice resource adjustments and dynamically expand microservices horizontally and vertically. Compared with other state-of-the-art microservice resource management methods, FMCRS can reduce sudden surges in overall network resource allocation, and thus, it is more suitable for the edge computing microservice management environment.}, } @article {pmid34072301, year = {2021}, author = {Botez, R and Costa-Requena, J and Ivanciu, IA and Strautiu, V and Dobrota, V}, title = {SDN-Based Network Slicing Mechanism for a Scalable 4G/5G Core Network: A Kubernetes Approach.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {11}, pages = {}, pmid = {34072301}, issn = {1424-8220}, abstract = {Managing the large volumes of IoT and M2M traffic requires the evaluation of the scalability and reliability for all the components in the end-to-end system. This includes connectivity, mobile network functions, and application or services receiving and processing the data from end devices. Firstly, this paper discusses the design of a containerized IoT and M2M application and the mechanisms for delivering automated scalability and high availability when deploying it in: (1) the edge using balenaCloud; (2) the Amazon Web Services cloud with EC2 instances; and (3) the dedicated Amazon Web Services IoT service. The experiments showed that there are no significant differences between edge and cloud deployments regarding resource consumption. Secondly, the solutions for scaling the 4G/5G network functions and mobile backhaul that provide the connectivity between devices and IoT/M2M applications are analyzed. In this case, the scalability and high availability of the 4G/5G components are provided by Kubernetes. The experiments showed that our proposed scaling algorithm for network slicing managed with SDN guarantees the necessary radio and network resources for end-to-end high availability.}, } @article {pmid34072232, year = {2021}, author = {Shoeibi, A and Khodatars, M and Ghassemi, N and Jafari, M and Moridian, P and Alizadehsani, R and Panahiazar, M and Khozeimeh, F and Zare, A and Hosseini-Nejad, H and Khosravi, A and Atiya, AF and Aminshahidi, D and Hussain, S and Rouhani, M and Nahavandi, S and Acharya, UR}, title = {Epileptic Seizures Detection Using Deep Learning Techniques: A Review.}, journal = {International journal of environmental research and public health}, volume = {18}, number = {11}, pages = {}, pmid = {34072232}, issn = {1660-4601}, mesh = {Algorithms ; Artificial Intelligence ; *Deep Learning ; Electroencephalography ; *Epilepsy/diagnosis ; Humans ; Seizures/diagnosis ; }, abstract = {A variety of screening approaches have been proposed to diagnose epileptic seizures, using electroencephalography (EEG) and magnetic resonance imaging (MRI) modalities. Artificial intelligence encompasses a variety of areas, and one of its branches is deep learning (DL). Before the rise of DL, conventional machine learning algorithms involving feature extraction were performed. This limited their performance to the ability of those handcrafting the features. However, in DL, the extraction of features and classification are entirely automated. The advent of these techniques in many areas of medicine, such as in the diagnosis of epileptic seizures, has made significant advances. In this study, a comprehensive overview of works focused on automated epileptic seizure detection using DL techniques and neuroimaging modalities is presented. Various methods proposed to diagnose epileptic seizures automatically using EEG and MRI modalities are described. In addition, rehabilitation systems developed for epileptic seizures using DL have been analyzed, and a summary is provided. The rehabilitation tools include cloud computing techniques and hardware required for implementation of DL algorithms. The important challenges in accurate detection of automated epileptic seizures using DL with EEG and MRI modalities are discussed. The advantages and limitations in employing DL-based techniques for epileptic seizures diagnosis are presented. Finally, the most promising DL models proposed and possible future works on automated epileptic seizure detection are delineated.}, } @article {pmid34071801, year = {2021}, author = {Rashed, EA and Hirata, A}, title = {One-Year Lesson: Machine Learning Prediction of COVID-19 Positive Cases with Meteorological Data and Mobility Estimate in Japan.}, journal = {International journal of environmental research and public health}, volume = {18}, number = {11}, pages = {}, pmid = {34071801}, issn = {1660-4601}, mesh = {*COVID-19 ; Forecasting ; Humans ; Japan/epidemiology ; Machine Learning ; *Pandemics ; SARS-CoV-2 ; }, abstract = {With the wide spread of COVID-19 and the corresponding negative impact on different life aspects, it becomes important to understand ways to deal with the pandemic as a part of daily routine. After a year of the COVID-19 pandemic, it has become obvious that different factors, including meteorological factors, influence the speed at which the disease is spread and the potential fatalities. However, the impact of each factor on the speed at which COVID-19 is spreading remains controversial. Accurate forecasting of potential positive cases may lead to better management of healthcare resources and provide guidelines for government policies in terms of the action required within an effective timeframe. Recently, Google Cloud has provided online COVID-19 forecasting data for the United States and Japan, which would help in predicting future situations on a state/prefecture scale and are updated on a day-by-day basis. In this study, we propose a deep learning architecture to predict the spread of COVID-19 considering various factors, such as meteorological data and public mobility estimates, and applied it to data collected in Japan to demonstrate its effectiveness. The proposed model was constructed using a neural network architecture based on a long short-term memory (LSTM) network. The model consists of multi-path LSTM layers that are trained using time-series meteorological data and public mobility data obtained from open-source data. The model was tested using different time frames, and the results were compared to Google Cloud forecasts. Public mobility is a dominant factor in estimating new positive cases, whereas meteorological data improve their accuracy. The average relative error of the proposed model ranged from 16.1% to 22.6% in major regions, which is a significant improvement compared with Google Cloud forecasting. This model can be used to provide public awareness regarding the morbidity risk of the COVID-19 pandemic in a feasible manner.}, } @article {pmid34071676, year = {2021}, author = {Gorgulla, C and Çınaroğlu, SS and Fischer, PD and Fackeldey, K and Wagner, G and Arthanari, H}, title = {VirtualFlow Ants-Ultra-Large Virtual Screenings with Artificial Intelligence Driven Docking Algorithm Based on Ant Colony Optimization.}, journal = {International journal of molecular sciences}, volume = {22}, number = {11}, pages = {}, pmid = {34071676}, issn = {1422-0067}, support = {R01 AI150709/AI/NIAID NIH HHS/United States ; R01 GM129026/GM/NIGMS NIH HHS/United States ; GM129026/GM/NIGMS NIH HHS/United States ; R01 AI037581/AI/NIAID NIH HHS/United States ; CA200913/CA/NCI NIH HHS/United States ; }, mesh = {*Algorithms ; *Artificial Intelligence ; Computational Biology/*methods ; Kelch-Like ECH-Associated Protein 1/chemistry/metabolism ; Ligands ; *Molecular Docking Simulation ; NF-E2-Related Factor 2/chemistry/metabolism ; Protein Binding ; Protein Conformation ; Reproducibility of Results ; Thermodynamics ; }, abstract = {The docking program PLANTS, which is based on ant colony optimization (ACO) algorithm, has many advanced features for molecular docking. Among them are multiple scoring functions, the possibility to model explicit displaceable water molecules, and the inclusion of experimental constraints. Here, we add support of PLANTS to VirtualFlow (VirtualFlow Ants), which adds a valuable method for primary virtual screenings and rescoring procedures. Furthermore, we have added support of ligand libraries in the MOL2 format, as well as on the fly conversion of ligand libraries which are in the PDBQT format to the MOL2 format to endow VirtualFlow Ants with an increased flexibility regarding the ligand libraries. The on the fly conversion is carried out with Open Babel and the program SPORES. We applied VirtualFlow Ants to a test system involving KEAP1 on the Google Cloud up to 128,000 CPUs, and the observed scaling behavior is approximately linear. Furthermore, we have adjusted several central docking parameters of PLANTS (such as the speed parameter or the number of ants) and screened 10 million compounds for each of the 10 resulting docking scenarios. We analyzed their docking scores and average docking times, which are key factors in virtual screenings. The possibility of carrying out ultra-large virtual screening with PLANTS via VirtualFlow Ants opens new avenues in computational drug discovery.}, } @article {pmid34071449, year = {2021}, author = {Ismail, L and Materwala, H and Hennebelle, A}, title = {A Scoping Review of Integrated Blockchain-Cloud (BcC) Architecture for Healthcare: Applications, Challenges and Solutions.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {11}, pages = {}, pmid = {34071449}, issn = {1424-8220}, support = {31R215//National Water and Energy Center, United Arab Emirates University/ ; }, mesh = {*Blockchain ; Computer Security ; Data Management ; Delivery of Health Care ; Electronic Health Records ; Humans ; }, abstract = {Blockchain is a disruptive technology for shaping the next era of a healthcare system striving for efficient and effective patient care. This is thanks to its peer-to-peer, secure, and transparent characteristics. On the other hand, cloud computing made its way into the healthcare system thanks to its elasticity and cost-efficiency nature. However, cloud-based systems fail to provide a secured and private patient-centric cohesive view to multiple healthcare stakeholders. In this situation, blockchain provides solutions to address security and privacy concerns of the cloud because of its decentralization feature combined with data security and privacy, while cloud provides solutions to the blockchain scalability and efficiency challenges. Therefore a novel paradigm of blockchain-cloud integration (BcC) emerges for the domain of healthcare. In this paper, we provide an in-depth analysis of the BcC integration for the healthcare system to give the readers the motivations behind the emergence of this new paradigm, introduce a classification of existing architectures and their applications for better healthcare. We then review the development platforms and services and highlight the research challenges for the integrated BcC architecture, possible solutions, and future research directions. The results of this paper will be useful for the healthcare industry to design and develop a data management system for better patient care.}, } @article {pmid34070966, year = {2021}, author = {Krzysztoń, M and Niewiadomska-Szynkiewicz, E}, title = {Intelligent Mobile Wireless Network for Toxic Gas Cloud Monitoring and Tracking.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {11}, pages = {}, pmid = {34070966}, issn = {1424-8220}, support = {833456//Horizon 2020/ ; }, abstract = {Intelligent wireless networks that comprise self-organizing autonomous vehicles equipped with punctual sensors and radio modules support many hostile and harsh environment monitoring systems. This work's contribution shows the benefits of applying such networks to estimate clouds' boundaries created by hazardous toxic substances heavier than air when accidentally released into the atmosphere. The paper addresses issues concerning sensing networks' design, focussing on a computing scheme for online motion trajectory calculation and data exchange. A three-stage approach that incorporates three algorithms for sensing devices' displacement calculation in a collaborative network according to the current task, namely exploration and gas cloud detection, boundary detection and estimation, and tracking the evolving cloud, is presented. A network connectivity-maintaining virtual force mobility model is used to calculate subsequent sensor positions, and multi-hop communication is used for data exchange. The main focus is on the efficient tracking of the cloud boundary. The proposed sensing scheme is sensitive to crucial mobility model parameters. The paper presents five procedures for calculating the optimal values of these parameters. In contrast to widely used techniques, the presented approach to gas cloud monitoring does not calculate sensors' displacements based on exact values of gas concentration and concentration gradients. The sensor readings are reduced to two values: the gas concentration below or greater than the safe value. The utility and efficiency of the presented method were justified through extensive simulations, giving encouraging results. The test cases were carried out on several scenarios with regular and irregular shapes of clouds generated using a widely used box model that describes the heavy gas dispersion in the atmospheric air. The simulation results demonstrate that using only a rough measurement indicating that the threshold concentration value was exceeded can detect and efficiently track a gas cloud boundary. This makes the sensing system less sensitive to the quality of the gas concentration measurement. Thus, it can be easily used to detect real phenomena. Significant results are recommendations on selecting procedures for computing mobility model parameters while tracking clouds with different shapes and determining optimal values of these parameters in convex and nonconvex cloud boundaries.}, } @article {pmid34070719, year = {2021}, author = {Tufail, A and Namoun, A and Sen, AAA and Kim, KH and Alrehaili, A and Ali, A}, title = {Moisture Computing-Based Internet of Vehicles (IoV) Architecture for Smart Cities.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {11}, pages = {}, pmid = {34070719}, issn = {1424-8220}, support = {Takamul Project 10//Islamic University of Madina, Saudi Arabia/ ; }, abstract = {Recently, the concept of combining 'things' on the Internet to provide various services has gained tremendous momentum. Such a concept has also impacted the automotive industry, giving rise to the Internet of Vehicles (IoV). IoV enables Internet connectivity and communication between smart vehicles and other devices on the network. Shifting the computing towards the edge of the network reduces communication delays and provides various services instantly. However, both distributed (i.e., edge computing) and central computing (i.e., cloud computing) architectures suffer from several inherent issues, such as high latency, high infrastructure cost, and performance degradation. We propose a novel concept of computation, which we call moisture computing (MC) to be deployed slightly away from the edge of the network but below the cloud infrastructure. The MC-based IoV architecture can be used to assist smart vehicles in collaborating to solve traffic monitoring, road safety, and management issues. Moreover, the MC can be used to dispatch emergency and roadside assistance in case of incidents and accidents. In contrast to the cloud which covers a broader area, the MC provides smart vehicles with critical information with fewer delays. We argue that the MC can help reduce infrastructure costs efficiently since it requires a medium-scale data center with moderate resources to cover a wider area compared to small-scale data centers in edge computing and large-scale data centers in cloud computing. We performed mathematical analyses to demonstrate that the MC reduces network delays and enhances the response time in contrast to the edge and cloud infrastructure. Moreover, we present a simulation-based implementation to evaluate the computational performance of the MC. Our simulation results show that the total processing time (computation delay and communication delay) is optimized, and delays are minimized in the MC as apposed to the traditional approaches.}, } @article {pmid34070069, year = {2021}, author = {Sim, SH and Jeong, YS}, title = {Multi-Blockchain-Based IoT Data Processing Techniques to Ensure the Integrity of IoT Data in AIoT Edge Computing Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {10}, pages = {}, pmid = {34070069}, issn = {1424-8220}, abstract = {As the development of IoT technologies has progressed rapidly recently, most IoT data are focused on monitoring and control to process IoT data, but the cost of collecting and linking various IoT data increases, requiring the ability to proactively integrate and analyze collected IoT data so that cloud servers (data centers) can process smartly. In this paper, we propose a blockchain-based IoT big data integrity verification technique to ensure the safety of the Third Party Auditor (TPA), which has a role in auditing the integrity of AIoT data. The proposed technique aims to minimize IoT information loss by multiple blockchain groupings of information and signature keys from IoT devices. The proposed technique allows IoT information to be effectively guaranteed the integrity of AIoT data by linking hash values designated as arbitrary, constant-size blocks with previous blocks in hierarchical chains. The proposed technique performs synchronization using location information between the central server and IoT devices to manage the cost of the integrity of IoT information at low cost. In order to easily control a large number of locations of IoT devices, we perform cross-distributed and blockchain linkage processing under constant rules to improve the load and throughput generated by IoT devices.}, } @article {pmid34068743, year = {2021}, author = {Melo, GCG and Torres, IC and Araújo, ÍBQ and Brito, DB and Barboza, EA}, title = {A Low-Cost IoT System for Real-Time Monitoring of Climatic Variables and Photovoltaic Generation for Smart Grid Application.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {9}, pages = {}, pmid = {34068743}, issn = {1424-8220}, support = {PD02/2016//Agência Nacional de Energia Elétrica (ANEEL) and Equatorial Energia/ ; }, abstract = {Monitoring and data acquisition are essential to recognize the renewable resources available on-site, evaluate electrical conversion efficiency, detect failures, and optimize electrical production. Commercial monitoring systems for the photovoltaic system are generally expensive and closed for modifications. This work proposes a low-cost real-time internet of things system for micro and mini photovoltaic generation systems that can monitor continuous voltage, continuous current, alternating power, and seven meteorological variables. The proposed system measures all relevant meteorological variables and directly acquires photovoltaic generation data from the plant (not from the inverter). The system is implemented using open software, connects to the internet without cables, stores data locally and in the cloud, and uses the network time protocol to synchronize the devices' clocks. To the best of our knowledge, no work reported in the literature presents these features altogether. Furthermore, experiments carried out with the proposed system showed good effectiveness and reliability. This system enables fog and cloud computing in a photovoltaic system, creating a time series measurements data set, enabling the future use of machine learning to create smart photovoltaic systems.}, } @article {pmid34068200, year = {2021}, author = {Amoakoh, AO and Aplin, P and Awuah, KT and Delgado-Fernandez, I and Moses, C and Alonso, CP and Kankam, S and Mensah, JC}, title = {Testing the Contribution of Multi-Source Remote Sensing Features for Random Forest Classification of the Greater Amanzule Tropical Peatland.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {10}, pages = {}, pmid = {34068200}, issn = {1424-8220}, support = {GTA//Edge Hill University/ ; }, abstract = {Tropical peatlands such as Ghana's Greater Amanzule peatland are highly valuable ecosystems and under great pressure from anthropogenic land use activities. Accurate measurement of their occurrence and extent is required to facilitate sustainable management. A key challenge, however, is the high cloud cover in the tropics that limits optical remote sensing data acquisition. In this work we combine optical imagery with radar and elevation data to optimise land cover classification for the Greater Amanzule tropical peatland. Sentinel-2, Sentinel-1 and Shuttle Radar Topography Mission (SRTM) imagery were acquired and integrated to drive a machine learning land cover classification using a random forest classifier. Recursive feature elimination was used to optimize high-dimensional and correlated feature space and determine the optimal features for the classification. Six datasets were compared, comprising different combinations of optical, radar and elevation features. Results showed that the best overall accuracy (OA) was found for the integrated Sentinel-2, Sentinel-1 and SRTM dataset (S2+S1+DEM), significantly outperforming all the other classifications with an OA of 94%. Assessment of the sensitivity of land cover classes to image features indicated that elevation and the original Sentinel-1 bands contributed the most to separating tropical peatlands from other land cover types. The integration of more features and the removal of redundant features systematically increased classification accuracy. We estimate Ghana's Greater Amanzule peatland covers 60,187 ha. Our proposed methodological framework contributes a robust workflow for accurate and detailed landscape-scale monitoring of tropical peatlands, while our findings provide timely information critical for the sustainable management of the Greater Amanzule peatland.}, } @article {pmid34066019, year = {2021}, author = {Puliafito, A and Tricomi, G and Zafeiropoulos, A and Papavassiliou, S}, title = {Smart Cities of the Future as Cyber Physical Systems: Challenges and Enabling Technologies.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {10}, pages = {}, pmid = {34066019}, issn = {1424-8220}, abstract = {A smart city represents an improvement of today's cities, both functionally and structurally, that strategically utilizes several smart factors, capitalizing on Information and Communications Technology (ICT) to increase the city's sustainable growth and strengthen the city's functions, while ensuring the citizens' enhanced quality of life and health. Cities can be viewed as a microcosm of interconnected "objects" with which citizens interact daily, which represents an extremely interesting example of a cyber physical system (CPS), where the continuous monitoring of a city's status occurs through sensors and processors applied within the real-world infrastructure. Each object in a city can be both the collector and distributor of information regarding mobility, energy consumption, air pollution as well as potentially offering cultural and tourist information. As a consequence, the cyber and real worlds are strongly linked and interdependent in a smart city. New services can be deployed when needed, and evaluation mechanisms can be set up to assess the health and success of a smart city. In particular, the objectives of creating ICT-enabled smart city environments target (but are not limited to) improved city services; optimized decision-making; the creation of smart urban infrastructures; the orchestration of cyber and physical resources; addressing challenging urban issues, such as environmental pollution, transportation management, energy usage and public health; the optimization of the use and benefits of next generation (5G and beyond) communication; the capitalization of social networks and their analysis; support for tactile internet applications; and the inspiration of urban citizens to improve their quality of life. However, the large scale deployment of cyber-physical-social systems faces a series of challenges and issues (e.g., energy efficiency requirements, architecture, protocol stack design, implementation, and security), which requires more smart sensing and computing methods as well as advanced networking and communications technologies to provide more pervasive cyber-physical-social services. In this paper, we discuss the challenges, the state-of-the-art, and the solutions to a set of currently unresolved key questions related to CPSs and smart cities.}, } @article {pmid34065920, year = {2021}, author = {Albowarab, MH and Zakaria, NA and Zainal Abidin, Z}, title = {Directionally-Enhanced Binary Multi-Objective Particle Swarm Optimisation for Load Balancing in Software Defined Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {10}, pages = {}, pmid = {34065920}, issn = {1424-8220}, abstract = {Various aspects of task execution load balancing of Internet of Things (IoTs) networks can be optimised using intelligent algorithms provided by software-defined networking (SDN). These load balancing aspects include makespan, energy consumption, and execution cost. While past studies have evaluated load balancing from one or two aspects, none has explored the possibility of simultaneously optimising all aspects, namely, reliability, energy, cost, and execution time. For the purposes of load balancing, implementing multi-objective optimisation (MOO) based on meta-heuristic searching algorithms requires assurances that the solution space will be thoroughly explored. Optimising load balancing provides not only decision makers with optimised solutions but a rich set of candidate solutions to choose from. Therefore, the purposes of this study were (1) to propose a joint mathematical formulation to solve load balancing challenges in cloud computing and (2) to propose two multi-objective particle swarm optimisation (MP) models; distance angle multi-objective particle swarm optimization (DAMP) and angle multi-objective particle swarm optimization (AMP). Unlike existing models that only use crowding distance as a criterion for solution selection, our MP models probabilistically combine both crowding distance and crowding angle. More specifically, we only selected solutions that had more than a 0.5 probability of higher crowding distance and higher angular distribution. In addition, binary variants of the approaches were generated based on transfer function, and they were denoted by binary DAMP (BDAMP) and binary AMP (BAMP). After using MOO mathematical functions to compare our models, BDAMP and BAMP, with state of the standard models, BMP, BDMP and BPSO, they were tested using the proposed load balancing model. Both tests proved that our DAMP and AMP models were far superior to the state of the art standard models, MP, crowding distance multi-objective particle swarm optimisation (DMP), and PSO. Therefore, this study enables the incorporation of meta-heuristic in the management layer of cloud networks.}, } @article {pmid34065434, year = {2021}, author = {Choi, Y and Kim, N and Hong, S and Bae, J and Park, I and Sohn, HG}, title = {Critical Image Identification via Incident-Type Definition Using Smartphone Data during an Emergency: A Case Study of the 2020 Heavy Rainfall Event in Korea.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {10}, pages = {}, pmid = {34065434}, issn = {1424-8220}, support = {no.20009742//Ministry of Interior and Safety (MOIS, Korea)/ ; }, mesh = {*Algorithms ; Cloud Computing ; Republic of Korea ; *Smartphone ; }, abstract = {In unpredictable disaster scenarios, it is important to recognize the situation promptly and take appropriate response actions. This study proposes a cloud computing-based data collection, processing, and analysis process that employs a crowd-sensing application. Clustering algorithms are used to define the major damage types, and hotspot analysis is applied to effectively filter critical data from crowdsourced data. To verify the utility of the proposed process, it is applied to Icheon-si and Anseong-si, both in Gyeonggi-do, which were affected by heavy rainfall in 2020. The results show that the types of incident at the damaged site were effectively detected, and images reflecting the damage situation could be classified using the application of the geospatial analysis technique. For 5 August 2020, which was close to the date of the event, the images were classified with a precision of 100% at a threshold of 0.4. For 24-25 August 2020, the image classification precision exceeded 95% at a threshold of 0.5, except for the mudslide mudflow in the Yul area. The location distribution of the classified images showed a distribution similar to that of damaged regions in unmanned aerial vehicle images.}, } @article {pmid34065011, year = {2021}, author = {Martínez-Gutiérrez, A and Díez-González, J and Ferrero-Guillén, R and Verde, P and Álvarez, R and Perez, H}, title = {Digital Twin for Automatic Transportation in Industry 4.0.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {10}, pages = {}, pmid = {34065011}, issn = {1424-8220}, support = {PID2019-108277GB-C21//Spanish Ministry of Science and Innovation/ ; }, abstract = {Industry 4.0 is the fourth industrial revolution consisting of the digitalization of processes facilitating an incremental value chain. Smart Manufacturing (SM) is one of the branches of the Industry 4.0 regarding logistics, visual inspection of pieces, optimal organization of processes, machine sensorization, real-time data adquisition and treatment and virtualization of industrial activities. Among these tecniques, Digital Twin (DT) is attracting the research interest of the scientific community in the last few years due to the cost reduction through the simulation of the dynamic behaviour of the industrial plant predicting potential problems in the SM paradigm. In this paper, we propose a new DT design concept based on external service for the transportation of the Automatic Guided Vehicles (AGVs) which are being recently introduced for the Material Requirement Planning satisfaction in the collaborative industrial plant. We have performed real experimentation in two different scenarios through the definition of an Industrial Ethernet platform for the real validation of the DT results obtained. Results show the correlation between the virtual and real experiments carried out in the two scenarios defined in this paper with an accuracy of 97.95% and 98.82% in the total time of the missions analysed in the DT. Therefore, these results validate the model created for the AGV navigation, thus fulfilling the objectives of this paper.}, } @article {pmid34064712, year = {2021}, author = {Sobczak, Ł and Filus, K and Domański, A and Domańska, J}, title = {LiDAR Point Cloud Generation for SLAM Algorithm Evaluation.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {10}, pages = {}, pmid = {34064712}, issn = {1424-8220}, support = {DOB-2P/02/07/2017//Narodowe Centrum Badań i Rozwoju/ ; }, abstract = {With the emerging interest in the autonomous driving level at 4 and 5 comes a necessity to provide accurate and versatile frameworks to evaluate the algorithms used in autonomous vehicles. There is a clear gap in the field of autonomous driving simulators. It covers testing and parameter tuning of a key component of autonomous driving systems, SLAM, frameworks targeting off-road and safety-critical environments. It also includes taking into consideration the non-idealistic nature of the real-life sensors, associated phenomena and measurement errors. We created a LiDAR simulator that delivers accurate 3D point clouds in real time. The point clouds are generated based on the sensor placement and the LiDAR type that can be set using configurable parameters. We evaluate our solution based on comparison of the results using an actual device, Velodyne VLP-16, on real-life tracks and the corresponding simulations. We measure the error values obtained using Google Cartographer SLAM algorithm and the distance between the simulated and real point clouds to verify their accuracy. The results show that our simulation (which incorporates measurement errors and the rolling shutter effect) produces data that can successfully imitate the real-life point clouds. Due to dedicated mechanisms, it is compatible with the Robotic Operating System (ROS) and can be used interchangeably with data from actual sensors, which enables easy testing, SLAM algorithm parameter tuning and deployment.}, } @article {pmid34064710, year = {2021}, author = {Khamisy-Farah, R and Furstenau, LB and Kong, JD and Wu, J and Bragazzi, NL}, title = {Gynecology Meets Big Data in the Disruptive Innovation Medical Era: State-of-Art and Future Prospects.}, journal = {International journal of environmental research and public health}, volume = {18}, number = {10}, pages = {}, pmid = {34064710}, issn = {1660-4601}, mesh = {Artificial Intelligence ; *Big Data ; Data Science ; Delivery of Health Care ; Female ; *Gynecology ; Humans ; }, abstract = {Tremendous scientific and technological achievements have been revolutionizing the current medical era, changing the way in which physicians practice their profession and deliver healthcare provisions. This is due to the convergence of various advancements related to digitalization and the use of information and communication technologies (ICTs)-ranging from the internet of things (IoT) and the internet of medical things (IoMT) to the fields of robotics, virtual and augmented reality, and massively parallel and cloud computing. Further progress has been made in the fields of addictive manufacturing and three-dimensional (3D) printing, sophisticated statistical tools such as big data visualization and analytics (BDVA) and artificial intelligence (AI), the use of mobile and smartphone applications (apps), remote monitoring and wearable sensors, and e-learning, among others. Within this new conceptual framework, big data represents a massive set of data characterized by different properties and features. These can be categorized both from a quantitative and qualitative standpoint, and include data generated from wet-lab and microarrays (molecular big data), databases and registries (clinical/computational big data), imaging techniques (such as radiomics, imaging big data) and web searches (the so-called infodemiology, digital big data). The present review aims to show how big and smart data can revolutionize gynecology by shedding light on female reproductive health, both in terms of physiology and pathophysiology. More specifically, they appear to have potential uses in the field of gynecology to increase its accuracy and precision, stratify patients, provide opportunities for personalized treatment options rather than delivering a package of "one-size-fits-it-all" healthcare management provisions, and enhance its effectiveness at each stage (health promotion, prevention, diagnosis, prognosis, and therapeutics).}, } @article {pmid34063234, year = {2021}, author = {Jalowiczor, J and Rozhon, J and Voznak, M}, title = {Study of the Efficiency of Fog Computing in an Optimized LoRaWAN Cloud Architecture.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {9}, pages = {}, pmid = {34063234}, issn = {1424-8220}, support = {SP2021/25//Ministerstvo Školství, Mládeže a Tělovýchovy/ ; LM2018140//e-Infrastructure CZ/ ; }, abstract = {The technologies of the Internet of Things (IoT) have an increasing influence on our daily lives. The expansion of the IoT is associated with the growing number of IoT devices that are connected to the Internet. As the number of connected devices grows, the demand for speed and data volume is also greater. While most IoT network technologies use cloud computing, this solution becomes inefficient for some use-cases. For example, suppose that a company that uses an IoT network with several sensors to collect data within a production hall. The company may require sharing only selected data to the public cloud and responding faster to specific events. In the case of a large amount of data, the off-loading techniques can be utilized to reach higher efficiency. Meeting these requirements is difficult or impossible for solutions adopting cloud computing. The fog computing paradigm addresses these cases by providing data processing closer to end devices. This paper proposes three possible network architectures that adopt fog computing for LoRaWAN because LoRaWAN is already deployed in many locations and offers long-distance communication with low-power consumption. The architecture proposals are further compared in simulations to select the optimal form in terms of total service time. The resulting optimal communication architecture could be deployed to the existing LoRaWAN with minimal cost and effort of the network operator.}, } @article {pmid34057379, year = {2021}, author = {Spjuth, O and Frid, J and Hellander, A}, title = {The machine learning life cycle and the cloud: implications for drug discovery.}, journal = {Expert opinion on drug discovery}, volume = {16}, number = {9}, pages = {1071-1079}, doi = {10.1080/17460441.2021.1932812}, pmid = {34057379}, issn = {1746-045X}, mesh = {Animals ; *Artificial Intelligence ; Cloud Computing ; Drug Discovery ; Humans ; Life Cycle Stages ; *Machine Learning ; }, abstract = {Introduction: Artificial intelligence (AI) and machine learning (ML) are increasingly used in many aspects of drug discovery. Larger data sizes and methods such as Deep Neural Networks contribute to challenges in data management, the required software stack, and computational infrastructure. There is an increasing need in drug discovery to continuously re-train models and make them available in production environments.Areas covered: This article describes how cloud computing can aid the ML life cycle in drug discovery. The authors discuss opportunities with containerization and scientific workflows and introduce the concept of MLOps and describe how it can facilitate reproducible and robust ML modeling in drug discovery organizations. They also discuss ML on private, sensitive and regulated data.Expert opinion: Cloud computing offers a compelling suite of building blocks to sustain the ML life cycle integrated in iterative drug discovery. Containerization and platforms such as Kubernetes together with scientific workflows can enable reproducible and resilient analysis pipelines, and the elasticity and flexibility of cloud infrastructures enables scalable and efficient access to compute resources. Drug discovery commonly involves working with sensitive or private data, and cloud computing and federated learning can contribute toward enabling collaborative drug discovery within and between organizations.Abbreviations: AI = Artificial Intelligence; DL = Deep Learning; GPU = Graphics Processing Unit; IaaS = Infrastructure as a Service; K8S = Kubernetes; ML = Machine Learning; MLOps = Machine Learning and Operations; PaaS = Platform as a Service; QC = Quality Control; SaaS = Software as a Service.}, } @article {pmid34050420, year = {2021}, author = {Marchand, JR and Pirard, B and Ertl, P and Sirockin, F}, title = {CAVIAR: a method for automatic cavity detection, description and decomposition into subcavities.}, journal = {Journal of computer-aided molecular design}, volume = {35}, number = {6}, pages = {737-750}, pmid = {34050420}, issn = {1573-4951}, mesh = {Binding Sites ; Ligands ; Machine Learning ; Protein Binding ; Protein Conformation ; Proteins/*chemistry ; Software ; }, abstract = {The accurate description of protein binding sites is essential to the determination of similarity and the application of machine learning methods to relate the binding sites to observed functions. This work describes CAVIAR, a new open source tool for generating descriptors for binding sites, using protein structures in PDB and mmCIF format as well as trajectory frames from molecular dynamics simulations as input. The applicability of CAVIAR descriptors is showcased by computing machine learning predictions of binding site ligandability. The method can also automatically assign subcavities, even in the absence of a bound ligand. The defined subpockets mimic the empirical definitions used in medicinal chemistry projects. It is shown that the experimental binding affinity scales relatively well with the number of subcavities filled by the ligand, with compounds binding to more than three subcavities having nanomolar or better affinities to the target. The CAVIAR descriptors and methods can be used in any machine learning-based investigations of problems involving binding sites, from protein engineering to hit identification. The full software code is available on GitHub and a conda package is hosted on Anaconda cloud.}, } @article {pmid34024075, year = {2021}, author = {Chandawarkar, R and Nadkarni, P}, title = {Safe clinical photography: best practice guidelines for risk management and mitigation.}, journal = {Archives of plastic surgery}, volume = {48}, number = {3}, pages = {295-304}, pmid = {34024075}, issn = {2234-6163}, abstract = {Clinical photography is an essential component of patient care in plastic surgery. The use of unsecured smartphone cameras, digital cameras, social media, instant messaging, and commercially available cloud-based storage devices threatens patients' data safety. This paper Identifies potential risks of clinical photography and heightens awareness of safe clinical photography. Specifically, we evaluated existing risk-mitigation strategies globally, comparing them to industry standards in similar settings, and formulated a framework for developing a risk-mitigation plan for avoiding data breaches by identifying the safest methods of picture taking, transfer to storage, retrieval, and use, both within and outside the organization. Since threats evolve constantly, the framework must evolve too. Based on a literature search of both PubMed and the web (via Google) with key phrases and child terms (for PubMed), the risks and consequences of data breaches in individual processes in clinical photography are identified. Current clinical-photography practices are described. Lastly, we evaluate current risk mitigation strategies for clinical photography by examining guidelines from professional organizations, governmental agencies, and non-healthcare industries. Combining lessons learned from the steps above into a comprehensive framework that could contribute to national/international guidelines on safe clinical photography, we provide recommendations for best practice guidelines. It is imperative that best practice guidelines for the simple, safe, and secure capture, transfer, storage, and retrieval of clinical photographs be co-developed through cooperative efforts between providers, hospital administrators, clinical informaticians, IT governance structures, and national professional organizations. This would significantly safeguard patient data security and provide the privacy that patients deserve and expect.}, } @article {pmid34022611, year = {2021}, author = {Bowler, AL and Watson, NJ}, title = {Transfer learning for process monitoring using reflection-mode ultrasonic sensing.}, journal = {Ultrasonics}, volume = {115}, number = {}, pages = {106468}, doi = {10.1016/j.ultras.2021.106468}, pmid = {34022611}, issn = {1874-9968}, abstract = {The fourth industrial revolution is set to integrate entire manufacturing processes using industrial digital technologies such as the Internet of Things, Cloud Computing, and machine learning to improve process productivity, efficiency, and sustainability. Sensors collect the real-time data required to optimise manufacturing processes and are therefore a key technology in this transformation. Ultrasonic sensors have benefits of being low-cost, in-line, non-invasive, and able to operate in opaque systems. Supervised machine learning models can correlate ultrasonic sensor data to useful information about the manufacturing materials and processes. However, this requires a reference measurement of the process material to label each data point for model training. Labelled data is often difficult to obtain in factory environments, and so a method of training models without this is desirable. This work compares two domain adaptation methods to transfer models across processes, so that no labelled data is required to accurately monitor a target process. The two method compared are a Single Feature transfer learning approach and Transfer Component Analysis using three features. Ultrasonic waveforms are unique to the sensor used, attachment procedure, and contact pressure. Therefore, only a small number of transferable features are investigated. Two industrially relevant processes were used as case studies: mixing and cleaning of fouling in pipes. A reflection-mode ultrasonic sensing technique was used, which monitors the sound wave reflected from the interface between the vessel wall and process material. Overall, the Single Feature method produced the highest prediction accuracies: up to 96.0% and 98.4% to classify the completion of mixing and cleaning, respectively; and R[2] values of up to 0.947 and 0.999 to predict the time remaining until completion. These results highlight the potential of combining ultrasonic measurements with transfer learning techniques to monitor industrial processes. Although, further work is required to study various effects such as changing sensor location between source and target domains.}, } @article {pmid34019075, year = {2021}, author = {Miller, M and Zaccheddu, N}, title = {Light for a Potentially Cloudy Situation: Approach to Validating Cloud Computing Tools.}, journal = {Biomedical instrumentation & technology}, volume = {55}, number = {2}, pages = {63-68}, pmid = {34019075}, issn = {0899-8205}, mesh = {*Cloud Computing ; }, } @article {pmid34016012, year = {2021}, author = {Sahu, ML and Atulkar, M and Ahirwal, MK and Ahamad, A}, title = {IoT-enabled cloud-based real-time remote ECG monitoring system.}, journal = {Journal of medical engineering & technology}, volume = {45}, number = {6}, pages = {473-485}, doi = {10.1080/03091902.2021.1921870}, pmid = {34016012}, issn = {1464-522X}, mesh = {Algorithms ; Cloud Computing ; Electrocardiography ; Humans ; *Internet of Things ; }, abstract = {Statistical reports all around the world have deemed cardiovascular diseases (CVDs) as the largest contributor to the death count. The electrocardiogram (ECG) is a widely accepted technology employed for investigation of CVDs of the person. The proposed solution deals with an efficient internet of things (IoT) enabled real-time ECG monitoring system using cloud computing technologies. The article presents a cloud-centric solution to provide remote monitoring of CVD. Sensed ECG data are transmitted to S3 bucket provided by Amazon web service (AWS) through a mobile gateway. AWS cloud uses HTTP and MQTT servers to provide data visualisation, quick response and long-live connection to device and user. Bluetooth low energy (BLE 4.0) is used as a communication protocol for low-power data transmission between device and mobile gateway. The proposed system is implemented with filtering algorithms to ignore distractions, environmental noise and motion artefacts. It offers an analysis of ECG signals to detect various parameters such as heartbeat, PQRST wave and QRS complex intervals along with respiration rate. The proposed system prototype has been tested and validated for reliable ECG monitoring remotely in real-time.}, } @article {pmid34013035, year = {2021}, author = {Usman Sana, M and Li, Z}, title = {Efficiency aware scheduling techniques in cloud computing: a descriptive literature review.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e509}, pmid = {34013035}, issn = {2376-5992}, abstract = {In the last decade, cloud computing becomes the most demanding platform to resolve issues and manage requests across the Internet. Cloud computing takes along terrific opportunities to run cost-effective scientific workflows without the requirement of possessing any set-up for customers. It makes available virtually unlimited resources that can be attained, organized, and used as required. Resource scheduling plays a fundamental role in the well-organized allocation of resources to every task in the cloud environment. However along with these gains many challenges are required to be considered to propose an efficient scheduling algorithm. An efficient Scheduling algorithm must enhance the implementation of goals like scheduling cost, load balancing, makespan time, security awareness, energy consumption, reliability, service level agreement maintenance, etc. To achieve the aforementioned goals many state-of-the-art scheduling techniques have been proposed based upon hybrid, heuristic, and meta-heuristic approaches. This work reviewed existing algorithms from the perspective of the scheduling objective and strategies. We conduct a comparative analysis of existing strategies along with the outcomes they provide. We highlight the drawbacks for insight into further research and open challenges. The findings aid researchers by providing a roadmap to propose efficient scheduling algorithms.}, } @article {pmid34013027, year = {2021}, author = {Baniata, H and Mahmood, S and Kertesz, A}, title = {Assessing anthropogenic heat flux of public cloud data centers: current and future trends.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e478}, pmid = {34013027}, issn = {2376-5992}, abstract = {Global average temperature had been significantly increasing during the past century, mainly due to the growing rates of greenhouse gas (GHG) emissions, leading to a global warming problem. Many research works indicated other causes of this problem, such as the anthropogenic heat flux (AHF). Cloud computing (CC) data centers (DCs), for example, perform massive computational tasks for end users, leading to emit huge amounts of waste heat towards the surrounding (local) atmosphere in the form of AHF. Out of the total power consumption of a public cloud DC, nearly 10% is wasted in the form of heat. In this paper, we quantitatively and qualitatively analyze the current state of AHF emissions of the top three cloud service providers (i.e., Google, Azure and Amazon) according to their average energy consumption and the global distribution of their DCs. In this study, we found that Microsoft Azure DCs emit the highest amounts of AHF, followed by Amazon and Google, respectively. We also found that Europe is the most negatively affected by AHF of public DCs, due to its small area relative to other continents and the large number of cloud DCs within. Accordingly, we present mean estimations of continental AHF density per square meter. Following our results, we found that the top three clouds (with waste heat at a rate of 1,720.512 MW) contribute an average of more than 2.8% out of averaged continental AHF emissions. Using this percentage, we provide future trends estimations of AHF densities in the period [2020-2100]. In one of the presented scenarios, our estimations predict that by 2100, AHF of public clouds DCs will reach 0.01 Wm[-2].}, } @article {pmid33997644, year = {2021}, author = {Onweni, CL and Venegas-Borsellino, CP and Treece, J and Turnbull, MT and Ritchie, C and Freeman, WD}, title = {The Power of Mobile Health: The Girl With the Gadgets in Uganda.}, journal = {Mayo Clinic proceedings. Innovations, quality & outcomes}, volume = {5}, number = {2}, pages = {486-494}, pmid = {33997644}, issn = {2542-4548}, abstract = {Medical-grade ultrasound devices are now pocket sized and can be easily transported to underserved parts of the world, allowing health care providers to have the tools to optimize diagnoses, inform management plans, and improve patient outcomes in remote locations. Other great advances in technology have recently occurred, such as artificial intelligence applied to mobile health devices and cloud computing, as augmented reality instructions make these devices more user friendly and readily applicable across health care encounters. However, broader awareness of the impact of these mobile health technologies is needed among health care providers, along with training on how to use them in valid and reproducible environments, for accurate diagnosis and treatment. This article provides a summary of a Mayo International Health Program journey to Bwindi, Uganda, with a portable mobile health unit. This article shows how point-of-care ultrasonography and other technologies can benefit remote clinical diagnosis and management in underserved areas around the world.}, } @article {pmid33989047, year = {2021}, author = {Qureshi, KN and Alhudhaif, A and Anwar, RW and Bhati, SN and Jeon, G}, title = {Fully Integrated Data Communication Framework by Using Visualization Augmented Reality for Internet of Things Networks.}, journal = {Big data}, volume = {9}, number = {4}, pages = {253-264}, doi = {10.1089/big.2020.0282}, pmid = {33989047}, issn = {2167-647X}, mesh = {*Augmented Reality ; Computer Simulation ; *Internet of Things ; }, abstract = {The new and integrated area called Internet of Things (IoT) has gained popularity due to its smart, objects, services and affordability. These networks are based on data communication, augmented reality (AR), and wired and wireless infrastructures. The basic objective of these network is data communication, environment monitoring, tracking, and sensing by using smart devices and sensor nodes. The dAR is one of the attractive and advanced areas that is integrated in IoT networks in smart homes and smart industries to convert the objects into 3D to visualize information and provide interactive reality-based control. With attraction, this idea has suffered with complex and heavy processes, computation complexities, network communication degradation, and network delay. This article presents a detailed overview of these technologies and proposes a more convenient and fast data communication model by using edge computing and Fifth-Generation platforms. The article also introduces a Visualization Augmented Reality framework for IoT (VAR-IoT) networks fully integrated by communication, sensing, and actuating features with a better interface to control the objects. The proposed network model is evaluated in simulation in terms of applications response time and network delay and it observes the better performance of the proposed framework.}, } @article {pmid33979321, year = {2021}, author = {Bahmani, A and Ferriter, K and Krishnan, V and Alavi, A and Alavi, A and Tsao, PS and Snyder, MP and Pan, C}, title = {Swarm: A federated cloud framework for large-scale variant analysis.}, journal = {PLoS computational biology}, volume = {17}, number = {5}, pages = {e1008977}, pmid = {33979321}, issn = {1553-7358}, support = {RM1 HG007735/HG/NHGRI NIH HHS/United States ; U24 HG009397/HG/NHGRI NIH HHS/United States ; }, mesh = {*Cloud Computing ; Computational Biology/*methods ; Computer Security ; Datasets as Topic ; *Genomics ; Privacy ; Software ; }, abstract = {Genomic data analysis across multiple cloud platforms is an ongoing challenge, especially when large amounts of data are involved. Here, we present Swarm, a framework for federated computation that promotes minimal data motion and facilitates crosstalk between genomic datasets stored on various cloud platforms. We demonstrate its utility via common inquiries of genomic variants across BigQuery in the Google Cloud Platform (GCP), Athena in the Amazon Web Services (AWS), Apache Presto and MySQL. Compared to single-cloud platforms, the Swarm framework significantly reduced computational costs, run-time delays and risks of security breach and privacy violation.}, } @article {pmid33968292, year = {2021}, author = {Masud, M and Gaba, GS and Choudhary, K and Alroobaea, R and Hossain, MS}, title = {A robust and lightweight secure access scheme for cloud based E-healthcare services.}, journal = {Peer-to-peer networking and applications}, volume = {14}, number = {5}, pages = {3043-3057}, pmid = {33968292}, issn = {1936-6442}, abstract = {Traditional healthcare services have transitioned into modern healthcare services where doctors remotely diagnose the patients. Cloud computing plays a significant role in this change by providing easy access to patients' medical records to all stakeholders, such as doctors, nurses, patients, life insurance agents, etc. Cloud services are scalable, cost-effective, and offer a broad range of mobile access to patients' electronic health record (EHR). Despite the cloud's enormous benefits like real-time data access, patients' EHR security and privacy are major concerns. Since the information about patients' health is highly sensitive and crucial, sharing it over the unsecured wireless medium brings many security challenges such as eavesdropping, modifications, etc. Considering the security needs of remote healthcare, this paper proposes a robust and lightweight, secure access scheme for cloud-based E-healthcare services. The proposed scheme addresses the potential threats to E-healthcare by providing a secure interface to stakeholders and prohibiting unauthorized users from accessing information stored in the cloud. The scheme makes use of multiple keys formed through the key derivation function (KDF) to ensure end-to-end ciphering of information for preventing misuse. The rights to access the cloud services are provided based on the identity and the association between stakeholders, thus ensuring privacy. Due to its simplicity and robustness, the proposed scheme is the best fit for protecting data security and privacy in cloud-based E-healthcare services.}, } @article {pmid33968291, year = {2021}, author = {Tamizhselvi, SP and Muthuswamy, V}, title = {Delay - aware bandwidth estimation and intelligent video transcoder in mobile cloud.}, journal = {Peer-to-peer networking and applications}, volume = {14}, number = {4}, pages = {2038-2060}, pmid = {33968291}, issn = {1936-6442}, abstract = {In recent years, smartphone users are interested in large volumes to view live videos and sharing video resources over social media (e.g., Youtube, Netflix). The continuous streaming of video in mobile devices faces many challenges in network parameters namely bandwidth estimation, congestion window, throughput, delay, and transcoding is a challenging and time-consuming task. To perform these resource-intensive tasks via mobile is complicated, and hence, the cloud is integrated with smartphones to provide Mobile Cloud Computing (MCC). To resolve the issue, we propose a novel framework called delay aware bandwidth estimation and intelligent video transcoder in mobile cloud. In this paper, we introduced four techniques, namely, Markov Mobile Bandwidth Cloud Estimation (MMBCE), Cloud Dynamic Congestion Window (CDCW), Queue-based Video Processing for Cloud Server (QVPS), and Intelligent Video Transcoding for selecting Server (IVTS). To evaluate the performance of the proposed algorithm, we implemented a testbed using the two mobile configurations and the public cloud server Amazon Web Server (AWS). The study and results in a real environment demonstrate that our proposed framework can improve the QoS requirements and outperforms the existing algorithms. Firstly, MMBCE utilizes the well-known Markov Decision Process (MDP) model to estimate the best bandwidth of mobile using reward function. MMBCE improves the performance of 50% PDR compared with other algorithms. CDCW fits the congestion window and reduces packet loss dynamically. CDCW produces 40% more goodput with minimal PLR. Next, in QVPS, the M/M/S queueing model is processed to reduce the video processing delay and calculates the total service time. Finally, IVTS applies the M/G/N model and reduces 6% utilization of transcoding workload, by intelligently selecting the minimum workload of the transcoding server. The IVTS takes less time in slow and fast mode. The performance analysis and experimental evaluation show that the queueing model reduces the delay by 0.2 ms and the server's utilization by 20%. Hence, in this work, the cloud minimizes delay effectively to deliver a good quality of video streaming on mobile.}, } @article {pmid33967273, year = {2021}, author = {Ito, Y and Unagami, M and Yamabe, F and Mitsui, Y and Nakajima, K and Nagao, K and Kobayashi, H}, title = {A method for utilizing automated machine learning for histopathological classification of testis based on Johnsen scores.}, journal = {Scientific reports}, volume = {11}, number = {1}, pages = {9962}, pmid = {33967273}, issn = {2045-2322}, mesh = {Adult ; Automation, Laboratory ; Azoospermia/*diagnosis/pathology ; Coloring Agents ; Eosine Yellowish-(YS) ; Hematoxylin ; Histocytochemistry/methods/*standards ; Humans ; Infertility, Male/*diagnosis/pathology ; *Machine Learning ; Male ; Seminiferous Tubules/*pathology/ultrastructure ; Spermatids/pathology/ultrastructure ; Spermatocytes/*pathology/ultrastructure ; Spermatogonia/pathology/ultrastructure ; }, abstract = {We examined whether a tool for determining Johnsen scores automatically using artificial intelligence (AI) could be used in place of traditional Johnsen scoring to support pathologists' evaluations. Average precision, precision, and recall were assessed by the Google Cloud AutoML Vision platform. We obtained testicular tissues for 275 patients and were able to use haematoxylin and eosin (H&E)-stained glass microscope slides from 264 patients. In addition, we cut out of parts of the histopathology images (5.0 × 5.0 cm) for expansion of Johnsen's characteristic areas with seminiferous tubules. We defined four labels: Johnsen score 1-3, 4-5, 6-7, and 8-10 to distinguish Johnsen scores in clinical practice. All images were uploaded to the Google Cloud AutoML Vision platform. We obtained a dataset of 7155 images at magnification 400× and a dataset of 9822 expansion images for the 5.0 × 5.0 cm cutouts. For the 400× magnification image dataset, the average precision (positive predictive value) of the algorithm was 82.6%, precision was 80.31%, and recall was 60.96%. For the expansion image dataset (5.0 × 5.0 cm), the average precision was 99.5%, precision was 96.29%, and recall was 96.23%. This is the first report of an AI-based algorithm for predicting Johnsen scores.}, } @article {pmid33965571, year = {2021}, author = {Schuhmacher, A and Gatto, A and Kuss, M and Gassmann, O and Hinder, M}, title = {Big Techs and startups in pharmaceutical R&D - A 2020 perspective on artificial intelligence.}, journal = {Drug discovery today}, volume = {26}, number = {10}, pages = {2226-2231}, doi = {10.1016/j.drudis.2021.04.028}, pmid = {33965571}, issn = {1878-5832}, mesh = {Artificial Intelligence/*trends ; Drug Development/*trends ; Drug Discovery/trends ; Drug Industry/*trends ; Entrepreneurship ; Humans ; Machine Learning/trends ; Research/trends ; Technology/trends ; }, abstract = {We investigated what kind of artificial intelligence (AI) technologies are utilized in pharmaceutical research and development (R&D) and which sources of AI-related competencies can be leveraged by pharmaceutical companies. First, we found that machine learning (ML) is the dominating AI technology currently used in pharmaceutical R&D. Second, both Big Techs and AI startups are competent knowledge bases for AI applications. Big Techs have long-lasting experience in the digital field and offer more general IT solutions to support pharmaceutical companies in cloud computing, health monitoring, diagnostics or clinical trial management, whereas startups can provide more specific AI services to address special issues in the drug-discovery space.}, } @article {pmid33959420, year = {2021}, author = {Mora-Márquez, F and Vázquez-Poletti, JL and López de Heredia, U}, title = {NGScloud2: optimized bioinformatic analysis using Amazon Web Services.}, journal = {PeerJ}, volume = {9}, number = {}, pages = {e11237}, pmid = {33959420}, issn = {2167-8359}, abstract = {BACKGROUND: NGScloud was a bioinformatic system developed to perform de novo RNAseq analysis of non-model species by exploiting the cloud computing capabilities of Amazon Web Services. The rapid changes undergone in the way this cloud computing service operates, along with the continuous release of novel bioinformatic applications to analyze next generation sequencing data, have made the software obsolete. NGScloud2 is an enhanced and expanded version of NGScloud that permits the access to ad hoc cloud computing infrastructure, scaled according to the complexity of each experiment.

METHODS: NGScloud2 presents major technical improvements, such as the possibility of running spot instances and the most updated AWS instances types, that can lead to significant cost savings. As compared to its initial implementation, this improved version updates and includes common applications for de novo RNAseq analysis, and incorporates tools to operate workflows of bioinformatic analysis of reference-based RNAseq, RADseq and functional annotation. NGScloud2 optimizes the access to Amazon's large computing infrastructures to easily run popular bioinformatic software applications, otherwise inaccessible to non-specialized users lacking suitable hardware infrastructures.

RESULTS: The correct performance of the pipelines for de novo RNAseq, reference-based RNAseq, RADseq and functional annotation was tested with real experimental data, providing workflow performance estimates and tips to make optimal use of NGScloud2. Further, we provide a qualitative comparison of NGScloud2 vs. the Galaxy framework. NGScloud2 code, instructions for software installation and use are available at https://github.com/GGFHF/NGScloud2. NGScloud2 includes a companion package, NGShelper that contains Python utilities to post-process the output of the pipelines for downstream analysis at https://github.com/GGFHF/NGShelper.}, } @article {pmid33954252, year = {2021}, author = {Raza, S and Ayzed Mirza, M and Ahmad, S and Asif, M and Rasheed, MB and Ghadi, Y}, title = {A vehicle to vehicle relay-based task offloading scheme in Vehicular Communication Networks.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e486}, pmid = {33954252}, issn = {2376-5992}, abstract = {Vehicular edge computing (VEC) is a potential field that distributes computational tasks between VEC servers and local vehicular terminals, hence improve vehicular services. At present, vehicles' intelligence and capabilities are rapidly improving, which will likely support many new and exciting applications. The network resources are well-utilized by exploiting neighboring vehicles' available resources while mitigating the VEC server's heavy burden. However, due to the vehicles' mobility, network topology, and the available computing resources change rapidly, which are difficult to predict. To tackle this problem, we investigate the task offloading schemes by utilizing vehicle to vehicle and vehicle to infrastructure communication modes and exploiting the vehicle's under-utilized computation and communication resources, and taking the cost and time consumption into account. We present a promising relay task-offloading scheme in vehicular edge computing (RVEC). According to this scheme, the tasks are offloaded in a vehicle to vehicle relay for computation while being transmitted to VEC servers. Numerical results illustrate that the RVEC scheme substantially enhances the network's overall offloading cost.}, } @article {pmid33948273, year = {2020}, author = {Vignolo, SM and Diray-Arce, J and McEnaney, K and Rao, S and Shannon, CP and Idoko, OT and Cole, F and Darboe, A and Cessay, F and Ben-Othman, R and Tebbutt, SJ and Kampmann, B and Levy, O and Ozonoff, A}, title = {A cloud-based bioinformatic analytic infrastructure and Data Management Core for the Expanded Program on Immunization Consortium.}, journal = {Journal of clinical and translational science}, volume = {5}, number = {1}, pages = {e52}, pmid = {33948273}, issn = {2059-8661}, support = {MC_UU_00026/2/MRC_/Medical Research Council/United Kingdom ; U19 AI118608/AI/NIAID NIH HHS/United States ; }, abstract = {The Expanded Program for Immunization Consortium - Human Immunology Project Consortium study aims to employ systems biology to identify and characterize vaccine-induced biomarkers that predict immunogenicity in newborns. Key to this effort is the establishment of the Data Management Core (DMC) to provide reliable data and bioinformatic infrastructure for centralized curation, storage, and analysis of multiple de-identified "omic" datasets. The DMC established a cloud-based architecture using Amazon Web Services to track, store, and share data according to National Institutes of Health standards. The DMC tracks biological samples during collection, shipping, and processing while capturing sample metadata and associated clinical data. Multi-omic datasets are stored in access-controlled Amazon Simple Storage Service (S3) for data security and file version control. All data undergo quality control processes at the generating site followed by DMC validation for quality assurance. The DMC maintains a controlled computing environment for data analysis and integration. Upon publication, the DMC deposits finalized datasets to public repositories. The DMC architecture provides resources and scientific expertise to accelerate translational discovery. Robust operations allow rapid sharing of results across the project team. Maintenance of data quality standards and public data deposition will further benefit the scientific community.}, } @article {pmid33946909, year = {2021}, author = {Fröhlich, P and Gelenbe, E and Fiołka, J and Chęciński, J and Nowak, M and Filus, Z}, title = {Smart SDN Management of Fog Services to Optimize QoS and Energy.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {9}, pages = {}, pmid = {33946909}, issn = {1424-8220}, support = {780139//Horizon 2020 Framework Programme/ ; }, abstract = {The short latency required by IoT devices that need to access specific services have led to the development of Fog architectures that can serve as a useful intermediary between IoT systems and the Cloud. However, the massive numbers of IoT devices that are being deployed raise concerns about the power consumption of such systems as the number of IoT devices and Fog servers increase. Thus, in this paper, we describe a software-defined network (SDN)-based control scheme for client-server interaction that constantly measures ongoing client-server response times and estimates network power consumption, in order to select connection paths that minimize a composite goal function, including both QoS and power consumption. The approach using reinforcement learning with neural networks has been implemented in a test-bed and is detailed in this paper. Experiments are presented that show the effectiveness of our proposed system in the presence of a time-varying workload of client-to-service requests, resulting in a reduction of power consumption of approximately 15% for an average response time increase of under 2%.}, } @article {pmid33941078, year = {2021}, author = {Ji, SS and German, CA and Lange, K and Sinsheimer, JS and Zhou, H and Zhou, J and Sobel, EM}, title = {Modern simulation utilities for genetic analysis.}, journal = {BMC bioinformatics}, volume = {22}, number = {1}, pages = {228}, pmid = {33941078}, issn = {1471-2105}, support = {R01 HG009120/NH/NIH HHS/United States ; P30 ES006694/ES/NIEHS NIH HHS/United States ; R01 HG006139/HG/NHGRI NIH HHS/United States ; R01 GM053275/NH/NIH HHS/United States ; T32 HG002536/HG/NHGRI NIH HHS/United States ; R35 GM141798/GM/NIGMS NIH HHS/United States ; K01 DK106116/DK/NIDDK NIH HHS/United States ; R01 HG006139/NH/NIH HHS/United States ; T32 HG002536/NH/NIH HHS/United States ; K01 DK106116/NH/NIH HHS/United States ; R01 GM053275/GM/NIGMS NIH HHS/United States ; }, mesh = {Aged ; *Cloud Computing ; Computer Simulation ; *Genetic Testing ; Humans ; Pedigree ; Phenotype ; }, abstract = {BACKGROUND: Statistical geneticists employ simulation to estimate the power of proposed studies, test new analysis tools, and evaluate properties of causal models. Although there are existing trait simulators, there is ample room for modernization. For example, most phenotype simulators are limited to Gaussian traits or traits transformable to normality, while ignoring qualitative traits and realistic, non-normal trait distributions. Also, modern computer languages, such as Julia, that accommodate parallelization and cloud-based computing are now mainstream but rarely used in older applications. To meet the challenges of contemporary big studies, it is important for geneticists to adopt new computational tools.

RESULTS: We present TraitSimulation, an open-source Julia package that makes it trivial to quickly simulate phenotypes under a variety of genetic architectures. This package is integrated into our OpenMendel suite for easy downstream analyses. Julia was purpose-built for scientific programming and provides tremendous speed and memory efficiency, easy access to multi-CPU and GPU hardware, and to distributed and cloud-based parallelization. TraitSimulation is designed to encourage flexible trait simulation, including via the standard devices of applied statistics, generalized linear models (GLMs) and generalized linear mixed models (GLMMs). TraitSimulation also accommodates many study designs: unrelateds, sibships, pedigrees, or a mixture of all three. (Of course, for data with pedigrees or cryptic relationships, the simulation process must include the genetic dependencies among the individuals.) We consider an assortment of trait models and study designs to illustrate integrated simulation and analysis pipelines. Step-by-step instructions for these analyses are available in our electronic Jupyter notebooks on Github. These interactive notebooks are ideal for reproducible research.

CONCLUSION: The TraitSimulation package has three main advantages. (1) It leverages the computational efficiency and ease of use of Julia to provide extremely fast, straightforward simulation of even the most complex genetic models, including GLMs and GLMMs. (2) It can be operated entirely within, but is not limited to, the integrated analysis pipeline of OpenMendel. And finally (3), by allowing a wider range of more realistic phenotype models, TraitSimulation brings power calculations and diagnostic tools closer to what investigators might see in real-world analyses.}, } @article {pmid33938423, year = {2021}, author = {Pascual-Ferrá, P and Alperstein, N and Barnett, DJ}, title = {A Multi-platform Approach to Monitoring Negative Dominance for COVID-19 Vaccine-Related Information Online.}, journal = {Disaster medicine and public health preparedness}, volume = {}, number = {}, pages = {1-24}, pmid = {33938423}, issn = {1938-744X}, abstract = {OBJECTIVE: The aim of this study was to test the appearance of negative dominance in COVID-19 vaccine-related information and activity online. We hypothesized that if negative dominance appeared, it would be a reflection of peaks in adverse events related to the vaccine, that negative content would attract more engagement on social media than other vaccine-related posts, and posts referencing adverse events related to COVID-19 vaccination would have a higher average toxicity score.

METHODS: We collected data using Google Trends for search behavior, CrowdTangle for social media data, and Media Cloud for media stories, and compared them against the dates of key adverse events related to COVID-19. We used Communalytic to analyze the toxicity of social media posts by platform and topic.

RESULTS: While our first hypothesis was partially supported, with peaks in search behavior for image and YouTube videos driven by adverse events, we did not find negative dominance in other types of searches or patterns of attention by news media or on social media.

CONCLUSION: We did not find evidence in our data to prove the negative dominance of adverse events related to COVID-19 vaccination on social media. Future studies should corroborate these findings and, if consistent, focus on explaining why this may be the case.}, } @article {pmid33937619, year = {2021}, author = {Hook, DW and Porter, SJ}, title = {Scaling Scientometrics: Dimensions on Google BigQuery as an Infrastructure for Large-Scale Analysis.}, journal = {Frontiers in research metrics and analytics}, volume = {6}, number = {}, pages = {656233}, pmid = {33937619}, issn = {2504-0537}, abstract = {Cloud computing has the capacity to transform many parts of the research ecosystem, from particular research areas to overall strategic decision making and policy. Scientometrics sits at the boundary between research and the decision-making, policy-making, and evaluation processes that underpin research. One of the biggest challenges in research policy and strategy is having access to data in a way that allows for analysis that can respond in an iterative way to inform decisions. Many decisions are based on "global" measures such as benchmark metrics that are hard to source and hence are often nonspecific or outdated. The use of cloud technologies may be promising in addressing this area of providing data for research strategy and policy decisions. A novel visualisation technique is introduced and used as a means to explore the potential for scaling scientometrics by democratising both access to data and compute capacity using the cloud.}, } @article {pmid33925902, year = {2021}, author = {Kim, WS}, title = {Progressive Traffic-Oriented Resource Management for Reducing Network Congestion in Edge Computing.}, journal = {Entropy (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, pmid = {33925902}, issn = {1099-4300}, support = {NRF-2020R1G1A1014096//National Research Foundation of Korea/ ; }, abstract = {Edge computing can deliver network services with low latency and real-time processing by providing cloud services at the network edge. Edge computing has a number of advantages such as low latency, locality, and network traffic distribution, but the associated resource management has become a significant challenge because of its inherent hierarchical, distributed, and heterogeneous nature. Various cloud-based network services such as crowd sensing, hierarchical deep learning systems, and cloud gaming each have their own traffic patterns and computing requirements. To provide a satisfactory user experience for these services, resource management that comprehensively considers service diversity, client usage patterns, and network performance indicators is required. In this study, an algorithm that simultaneously considers computing resources and network traffic load when deploying servers that provide edge services is proposed. The proposed algorithm generates candidate deployments based on factors that affect traffic load, such as the number of servers, server location, and client mapping according to service characteristics and usage. A final deployment plan is then established using a partial vector bin packing scheme that considers both the generated traffic and computing resources in the network. The proposed algorithm is evaluated using several simulations that consider actual network service and device characteristics.}, } @article {pmid33924090, year = {2021}, author = {Chen, YY and Chen, MH and Chang, CM and Chang, FS and Lin, YH}, title = {A Smart Home Energy Management System Using Two-Stage Non-Intrusive Appliance Load Monitoring over Fog-Cloud Analytics Based on Tridium's Niagara Framework for Residential Demand-Side Management.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {8}, pages = {}, pmid = {33924090}, issn = {1424-8220}, support = {MOST 109-3116-F-006-017-CC2//Ministry of Science and Technology, Taiwan/ ; MOST 109-2221-E-027-121-MY2//Ministry of Science and Technology, Taiwan/ ; O01109E048 (Industry-Academia Collaboration Project)//First International Computer, Inc. (FIC), Taiwan/ ; }, abstract = {Electricity is a vital resource for various human activities, supporting customers' lifestyles in today's modern technologically driven society. Effective demand-side management (DSM) can alleviate ever-increasing electricity demands that arise from customers in downstream sectors of a smart grid. Compared with the traditional means of energy management systems, non-intrusive appliance load monitoring (NIALM) monitors relevant electrical appliances in a non-intrusive manner. Fog (edge) computing addresses the need to capture, process and analyze data generated and gathered by Internet of Things (IoT) end devices, and is an advanced IoT paradigm for applications in which resources, such as computing capability, of a central data center acted as cloud computing are placed at the edge of the network. The literature leaves NIALM developed over fog-cloud computing and conducted as part of a home energy management system (HEMS). In this study, a Smart HEMS prototype based on Tridium's Niagara Framework[®] has been established over fog (edge)-cloud computing, where NIALM as an IoT application in energy management has also been investigated in the framework. The SHEMS prototype established over fog-cloud computing in this study utilizes an artificial neural network-based NIALM approach to non-intrusively monitor relevant electrical appliances without an intrusive deployment of plug-load power meters (smart plugs), where a two-stage NIALM approach is completed. The core entity of the SHEMS prototype is based on a compact, cognitive, embedded IoT controller that connects IoT end devices, such as sensors and meters, and serves as a gateway in a smart house/smart building for residential DSM. As demonstrated and reported in this study, the established SHEMS prototype using the investigated two-stage NIALM approach is feasible and usable.}, } @article {pmid33923182, year = {2021}, author = {Ghaleb, M and Azzedin, F}, title = {Towards Scalable and Efficient Architecture for Modeling Trust in IoT Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {9}, pages = {}, pmid = {33923182}, issn = {1424-8220}, support = {13-INF2452-04//King Abdulaziz City for Science and Technology/ ; }, abstract = {The Internet of Services (IoS) is gaining ground where cloud environments are utilized to create, subscribe, publish, and share services. The fast and significant evolution of IoS is affecting various aspects in people's life and is enabling a wide spectrum of services and applications ranging from smart e-health, smart homes, to smart surveillance. Building trusted IoT environments is of great importance to achieve the full benefits of IoS. In addition, building trusted IoT environments mitigates unrecoverable and unexpected damages in order to create reliable, efficient, stable, and flexible smart IoS-driven systems. Therefore, ensuring trust will provide the confidence and belief that IoT devices and consequently IoS behave as expected. Before hosting trust models, suitable architecture for Fog computing is needed to provide scalability, fast data access, simple and efficient intra-communication, load balancing, decentralization, and availability. In this article, we propose scalable and efficient Chord-based horizontal architecture. We also show how trust modeling can be mapped to our proposed architecture. Extensive performance evaluation experiments have been conducted to evaluate the performance and the feasibility and also to verify the behavior of our proposed architecture.}, } @article {pmid33922893, year = {2021}, author = {Kim, T and Yoo, SE and Kim, Y}, title = {Edge/Fog Computing Technologies for IoT Infrastructure.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {9}, pages = {}, pmid = {33922893}, issn = {1424-8220}, abstract = {The prevalence of smart devices and cloud computing has led to an explosion in the amount of data generated by IoT devices [...].}, } @article {pmid33922751, year = {2021}, author = {Masip-Bruin, X and Marín-Tordera, E and Sánchez-López, S and Garcia, J and Jukan, A and Juan Ferrer, A and Queralt, A and Salis, A and Bartoli, A and Cankar, M and Cordeiro, C and Jensen, J and Kennedy, J}, title = {Managing the Cloud Continuum: Lessons Learnt from a Real Fog-to-Cloud Deployment.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {9}, pages = {}, pmid = {33922751}, issn = {1424-8220}, support = {730929//H2020 LEIT Information and Communication Technologies/ ; }, abstract = {The wide adoption of the recently coined fog and edge computing paradigms alongside conventional cloud computing creates a novel scenario, known as the cloud continuum, where services may benefit from the overall set of resources to optimize their execution. To operate successfully, such a cloud continuum scenario demands for novel management strategies, enabling a coordinated and efficient management of the entire set of resources, from the edge up to the cloud, designed in particular to address key edge characteristics, such as mobility, heterogeneity and volatility. The design of such a management framework poses many research challenges and has already promoted many initiatives worldwide at different levels. In this paper we present the results of one of these experiences driven by an EU H2020 project, focusing on the lessons learnt from a real deployment of the proposed management solution in three different industrial scenarios. We think that such a description may help understand the benefits brought in by a holistic cloud continuum management and also may help other initiatives in their design and development processes.}, } @article {pmid33922709, year = {2021}, author = {Huang, W and Zhou, J and Zhang, D}, title = {On-the-Fly Fusion of Remotely-Sensed Big Data Using an Elastic Computing Paradigm with a Containerized Spark Engine on Kubernetes.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {9}, pages = {}, pmid = {33922709}, issn = {1424-8220}, abstract = {Remotely-sensed satellite image fusion is indispensable for the generation of long-term gap-free Earth observation data. While cloud computing (CC) provides the big picture for RS big data (RSBD), the fundamental question of the efficient fusion of RSBD on CC platforms has not yet been settled. To this end, we propose a lightweight cloud-native framework for the elastic processing of RSBD in this study. With the scaling mechanisms provided by both the Infrastructure as a Service (IaaS) and Platform as a Services (PaaS) of CC, the Spark-on-Kubernetes operator model running in the framework can enhance the efficiency of Spark-based algorithms without considering bottlenecks such as task latency caused by an unbalanced workload, and can ease the burden to tune the performance parameters for their parallel algorithms. Internally, we propose a task scheduling mechanism (TSM) to dynamically change the Spark executor pods' affinities to the computing hosts. The TSM learns the workload of a computing host. Learning from the ratio between the number of completed and failed tasks on a computing host, the TSM dispatches Spark executor pods to newer and less-overwhelmed computing hosts. In order to illustrate the advantage, we implement a parallel enhanced spatial and temporal adaptive reflectance fusion model (PESTARFM) to enable the efficient fusion of big RS images with a Spark aggregation function. We construct an OpenStack cloud computing environment to test the usability of the framework. According to the experiments, TSM can improve the performance of the PESTARFM using only PaaS scaling to about 11.7%. When using both the IaaS and PaaS scaling, the maximum performance gain with the TSM can be even greater than 13.6%. The fusion of such big Sentinel and PlanetScope images requires less than 4 min in the experimental environment.}, } @article {pmid33921505, year = {2021}, author = {Vidana Morales, RY and Ortega Cisneros, S and Camacho Perez, JR and Sandoval Ibarra, F and Casas Carrillo, R}, title = {3D Simulation-Based Acoustic Wave Resonator Analysis and Validation Using Novel Finite Element Method Software.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {8}, pages = {}, pmid = {33921505}, issn = {1424-8220}, abstract = {This work illustrates the analysis of Film Bulk Acoustic Resonators (FBAR) using 3D Finite Element (FEM) simulations with the software OnScale in order to predict and improve resonator performance and quality before manufacturing. This kind of analysis minimizes manufacturing cycles by reducing design time with 3D simulations running on High-Performance Computing (HPC) cloud services. It also enables the identification of manufacturing effects on device performance. The simulation results are compared and validated with a manufactured FBAR device, previously reported, to further highlight the usefulness and advantages of the 3D simulations-based design process. In the 3D simulation results, some analysis challenges, like boundary condition definitions, mesh tuning, loss source tracing, and device quality estimations, were studied. Hence, it is possible to highlight that modern FEM solvers, like OnScale enable unprecedented FBAR analysis and design optimization.}, } @article {pmid33920249, year = {2021}, author = {Ye, Z and Yan, G and Wei, Y and Zhou, B and Li, N and Shen, S and Wang, L}, title = {Real-Time and Efficient Traffic Information Acquisition via Pavement Vibration IoT Monitoring System.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {8}, pages = {}, pmid = {33920249}, issn = {1424-8220}, support = {Z191100008019002//Beijing major science and technology projects/ ; }, abstract = {Traditional road-embedded monitoring systems for traffic monitoring have the disadvantages of a short life, high energy consumption and data redundancy, resulting in insufficient durability and high cost. In order to improve the durability and efficiency of the road-embedded monitoring system, a pavement vibration monitoring system is developed based on the Internet of things (IoT). The system includes multi-acceleration sensing nodes, a gateway, and a cloud platform. The key design principles and technologies of each part of the system are proposed, which provides valuable experience for the application of IoT monitoring technology in road infrastructures. Characterized by low power consumption, distributed computing, and high extensibility properties, the pavement vibration IoT monitoring system can realize the monitoring, transmission, and analysis of pavement vibration signal, and acquires the real-time traffic information. This road-embedded system improves the intellectual capacity of road infrastructure and is conducive to the construction of a new generation of smart roads.}, } @article {pmid33920075, year = {2021}, author = {Molina-Molina, JC and Salhaoui, M and Guerrero-González, A and Arioua, M}, title = {Autonomous Marine Robot Based on AI Recognition for Permanent Surveillance in Marine Protected Areas.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {8}, pages = {}, pmid = {33920075}, issn = {1424-8220}, support = {PROYECTO VIGIA: SISTEMA DE VIGILANCIA COSTERO BASADO EN VEHÍCULOS AUTÓNOMOS DE SUPERFICIE.//Consejería de Agua, Agricultura, Ganadería, Pesca y Medio Ambiente, Comunidad Autónoma de la Región de Murcia/ ; PROYECTO OOMMUR: OBSERVATORIO OCEANOGRÁFICO MÓVIL DE LA REGIÓN DE MURCIA//Consejería de Empleo, Universidades, Empresa y Medio Ambiente, Dirección General de Comercio, Consumo y Simplificación administrativa/ ; }, mesh = {Artificial Intelligence ; Biodiversity ; Conservation of Natural Resources ; *Ecosystem ; Humans ; Oceans and Seas ; *Robotics ; }, abstract = {The world's oceans are one of the most valuable sources of biodiversity and resources on the planet, although there are areas where the marine ecosystem is threatened by human activities. Marine protected areas (MPAs) are distinctive spaces protected by law due to their unique characteristics, such as being the habitat of endangered marine species. Even with this protection, there are still illegal activities such as poaching or anchoring that threaten the survival of different marine species. In this context, we propose an autonomous surface vehicle (ASV) model system for the surveillance of marine areas by detecting and recognizing vessels through artificial intelligence (AI)-based image recognition services, in search of those carrying out illegal activities. Cloud and edge AI computing technologies were used for computer vision. These technologies have proven to be accurate and reliable in detecting shapes and objects for which they have been trained. Azure edge and cloud vision services offer the best option in terms of accuracy for this task. Due to the lack of 4G and 5G coverage in offshore marine environments, it is necessary to use radio links with a coastal base station to ensure communications, which may result in a high response time due to the high latency involved. The analysis of on-board images may not be sufficiently accurate; therefore, we proposed a smart algorithm for autonomy optimization by selecting the proper AI technology according to the current scenario (SAAO) capable of selecting the best AI source for the current scenario in real time, according to the required recognition accuracy or low latency. The SAAO optimizes the execution, efficiency, risk reduction, and results of each stage of the surveillance mission, taking appropriate decisions by selecting either cloud or edge vision models without human intervention.}, } @article {pmid33919222, year = {2021}, author = {Dos Anjos, JCS and Gross, JLG and Matteussi, KJ and González, GV and Leithardt, VRQ and Geyer, CFR}, title = {An Algorithm to Minimize Energy Consumption and Elapsed Time for IoT Workloads in a Hybrid Architecture.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {9}, pages = {}, pmid = {33919222}, issn = {1424-8220}, support = {J118//Consejería De Economía Y Empleo: System for simulation and training in advanced techniques for the occupational risk prevention through the design of hybrid-reality environments with ref. J118/ ; }, abstract = {Advances in communication technologies have made the interaction of small devices, such as smartphones, wearables, and sensors, scattered on the Internet, bringing a whole new set of complex applications with ever greater task processing needs. These Internet of things (IoT) devices run on batteries with strict energy restrictions. They tend to offload task processing to remote servers, usually to cloud computing (CC) in datacenters geographically located away from the IoT device. In such a context, this work proposes a dynamic cost model to minimize energy consumption and task processing time for IoT scenarios in mobile edge computing environments. Our approach allows for a detailed cost model, with an algorithm called TEMS that considers energy, time consumed during processing, the cost of data transmission, and energy in idle devices. The task scheduling chooses among cloud or mobile edge computing (MEC) server or local IoT devices to achieve better execution time with lower cost. The simulated environment evaluation saved up to 51.6% energy consumption and improved task completion time up to 86.6%.}, } @article {pmid33918614, year = {2021}, author = {Kyung, Y}, title = {Prioritized Task Distribution Considering Opportunistic Fog Computing Nodes.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {8}, pages = {}, pmid = {33918614}, issn = {1424-8220}, abstract = {As service latency and core network load relates to performance issues in the conventional cloud-based computing environment, the fog computing system has gained a lot of interest. However, since the load can be concentrated on specific fog computing nodes because of spatial and temporal service characteristics, performance degradation can occur, resulting in quality of service (QoS) degradation, especially for delay-sensitive services. Therefore, this paper proposes a prioritized task distribution scheme, which considers static as well as opportunistic fog computing nodes according to their mobility feature. Based on the requirements of offloaded tasks, the proposed scheme supports delay sensitive task processing at the static fog node and delay in-sensitive tasks by means of opportunistic fog nodes for task distribution. To assess the performance of the proposed scheme, we develop an analytic model for the service response delay. Extensive simulation results are given to validate the analytic model and to show the performance of the proposed scheme, compared to the conventional schemes in terms of service response delay and outage probability.}, } @article {pmid33918443, year = {2021}, author = {Zhang, P and Zhang, M and Liu, J}, title = {Real-Time HD Map Change Detection for Crowdsourcing Update Based on Mid-to-High-End Sensors.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {7}, pages = {}, pmid = {33918443}, issn = {1424-8220}, abstract = {Continuous maintenance and real-time update of high-definition (HD) maps is a big challenge. With the development of autonomous driving, more and more vehicles are equipped with a variety of advanced sensors and a powerful computing platform. Based on mid-to-high-end sensors including an industry camera, a high-end Global Navigation Satellite System (GNSS)/Inertial Measurement Unit (IMU), and an onboard computing platform, a real-time HD map change detection method for crowdsourcing update is proposed in this paper. First, a mature commercial integrated navigation product is directly used to achieve a self-positioning accuracy of 20 cm on average. Second, an improved network based on BiSeNet is utilized for real-time semantic segmentation. It achieves the result of 83.9% IOU (Intersection over Union) on Nvidia Pegasus at 31 FPS. Third, a visual Simultaneous Localization and Mapping (SLAM) associated with pixel type information is performed to obtain the semantic point cloud data of features such as lane dividers, road markings, and other static objects. Finally, the semantic point cloud data is vectorized after denoising and clustering, and the results are matched with a pre-constructed HD map to confirm map elements that have not changed and generate new elements when appearing. The experiment conducted in Beijing shows that the method proposed is effective for crowdsourcing update of HD maps.}, } @article {pmid33918246, year = {2021}, author = {Chang, SC and Lu, MT and Pan, TH and Chen, CS}, title = {Evaluating the E-Health Cloud Computing Systems Adoption in Taiwan's Healthcare Industry.}, journal = {Life (Basel, Switzerland)}, volume = {11}, number = {4}, pages = {}, pmid = {33918246}, issn = {2075-1729}, abstract = {Although the electronic health (e-health) cloud computing system is a promising innovation, its adoption in the healthcare industry has been slow. This study investigated the adoption of e-health cloud computing systems in the healthcare industry and considered security functions, management, cloud service delivery, and cloud software for e-health cloud computing systems. Although numerous studies have determined factors affecting e-health cloud computing systems, few comprehensive reviews of factors and their relations have been conducted. Therefore, this study investigated the relations between the factors affecting e-health cloud computing systems by using a multiple criteria decision-making technique, in which decision-making trial and evaluation laboratory (DEMATEL), DANP (DEMATEL-based Analytic Network Process), and modified VIKOR (VlseKriterijumska Optimizacija I Kompromisno Resenje) approaches were combined. The intended level of adoption of an e-health cloud computing system could be determined by using the proposed approach. The results of a case study performed on the Taiwanese healthcare industry indicated that the cloud management function must be primarily enhanced and that cost effectiveness is the most significant factor in the adoption of e-health cloud computing. This result is valuable for allocating resources to decrease performance gaps in the Taiwanese healthcare industry.}, } @article {pmid33916901, year = {2021}, author = {Cafuta, D and Dodig, I and Cesar, I and Kramberger, T}, title = {Developing a Modern Greenhouse Scientific Research Facility-A Case Study.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {8}, pages = {}, pmid = {33916901}, issn = {1424-8220}, support = {IP-2019-04//Hrvatska Zaklada za Znanost/ ; }, mesh = {*Algorithms ; *Artificial Intelligence ; *Greenhouse Effect ; Reproducibility of Results ; }, abstract = {Multidisciplinary approaches in science are still rare, especially in completely different fields such as agronomy science and computer science. We aim to create a state-of-the-art floating ebb and flow system greenhouse that can be used in future scientific experiments. The objective is to create a self-sufficient greenhouse with sensors, cloud connectivity, and artificial intelligence for real-time data processing and decision making. We investigated various approaches and proposed an optimal solution that can be used in much future research on plant growth in floating ebb and flow systems. A novel microclimate pocket-detection solution is proposed using an automatically guided suspended platform sensor system. Furthermore, we propose a methodology for replacing sensor data knowledge with artificial intelligence for plant health estimation. Plant health estimation allows longer ebb periods and increases the nutrient level in the final product. With intelligent design and the use of artificial intelligence algorithms, we will reduce the cost of plant research and increase the usability and reliability of research data. Thus, our newly developed greenhouse would be more suitable for plant growth research and production.}, } @article {pmid33916818, year = {2021}, author = {Alenizi, F and Rana, O}, title = {Dynamically Controlling Offloading Thresholds in Fog Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {7}, pages = {}, pmid = {33916818}, issn = {1424-8220}, abstract = {Fog computing is a potential solution to overcome the shortcomings of cloud-based processing of IoT tasks. These drawbacks can include high latency, location awareness, and security-attributed to the distance between IoT devices and cloud-hosted servers. Although fog computing has evolved as a solution to address these challenges, it is known for having limited resources that need to be effectively utilized, or its advantages could be lost. Computational offloading and resource management are critical to be able to benefit from fog computing systems. We introduce a dynamic, online, offloading scheme that involves the execution of delay-sensitive tasks. This paper proposes an architecture of a fog node able to adjust its offloading threshold dynamically (i.e., the criteria by which a fog node decides whether tasks should be offloaded rather than executed locally) using two algorithms: dynamic task scheduling (DTS) and dynamic energy control (DEC). These algorithms seek to minimize overall delay, maximize throughput, and minimize energy consumption at the fog layer. Compared to other benchmarks, our approach could reduce latency by up to 95%, improve throughput by 71%, and reduce energy consumption by up to 67% in fog nodes.}, } @article {pmid33916120, year = {2021}, author = {Kelly, C and Pitropakis, N and Mylonas, A and McKeown, S and Buchanan, WJ}, title = {A Comparative Analysis of Honeypots on Different Cloud Platforms.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {7}, pages = {}, pmid = {33916120}, issn = {1424-8220}, support = {952672//Horizon 2020 Framework Programme/ ; }, abstract = {In 2019, the majority of companies used at least one cloud computing service and it is expected that by the end of 2021, cloud data centres will process 94% of workloads. The financial and operational advantages of moving IT infrastructure to specialised cloud providers are clearly compelling. However, with such volumes of private and personal data being stored in cloud computing infrastructures, security concerns have risen. Motivated to monitor and analyze adversarial activities, we deploy multiple honeypots on the popular cloud providers, namely Amazon Web Services (AWS), Google Cloud Platform (GCP) and Microsoft Azure, and operate them in multiple regions. Logs were collected over a period of three weeks in May 2020 and then comparatively analysed, evaluated and visualised. Our work revealed heterogeneous attackers' activity on each cloud provider, both when one considers the volume and origin of attacks, as well as the targeted services and vulnerabilities. Our results highlight the attempt of threat actors to abuse popular services, which were widely used during the COVID-19 pandemic for remote working, such as remote desktop sharing. Furthermore, the attacks seem to exit not only from countries that are commonly found to be the source of attacks, such as China, Russia and the United States, but also from uncommon ones such as Vietnam, India and Venezuela. Our results provide insights on the adversarial activity during our experiments, which can be used to inform the Situational Awareness operations of an organisation.}, } @article {pmid33908021, year = {2021}, author = {Katz, JE}, title = {Deploying Mass Spectrometric Data Analysis in the Amazon AWS Cloud Computing Environment.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2271}, number = {}, pages = {375-397}, doi = {10.1007/978-1-0716-1241-5_26}, pmid = {33908021}, issn = {1940-6029}, mesh = {*Cloud Computing ; Information Dissemination ; *Mass Spectrometry ; Research Design ; Software ; Workflow ; }, abstract = {There are many advantages for deploying a mass spectrometry workflow to the cloud. While "cloud computing" can have many meanings, in this case, I am simply referring to a virtual computer that is remotely accessible over the Internet. This "computer" can have as many or few resources (CPU, RAM, disk space, etc.) as your demands require and those resources can be changed as you need without requiring complete reinstalls. Systems can be easily "checkpointed" and restored. I will describe how to deploy virtualized, remotely accessible computers on which you can perform your basic mass spectrometry data analysis. This use is a quite restricted microcosm of what is available under the umbrella of "cloud computing" but it is also the (useful!) niche use for which straightforward how-to documentation is lacking.This chapter is intended for people with little or no experience in creating cloud computing instances. Executing the steps in this chapter, will empower you to instantiate a computer with the performance of your choosing with preconfigured software already installed using the Amazon Web Service (AWS) suite of tools. You can use this for use cases that span when you need limited access to high end computing thru when you give your collaborators access to preconfigured computers to look at their data.}, } @article {pmid33907414, year = {2021}, author = {Ningrum, DNA and Yuan, SP and Kung, WM and Wu, CC and Tzeng, IS and Huang, CY and Li, JY and Wang, YC}, title = {Deep Learning Classifier with Patient's Metadata of Dermoscopic Images in Malignant Melanoma Detection.}, journal = {Journal of multidisciplinary healthcare}, volume = {14}, number = {}, pages = {877-885}, pmid = {33907414}, issn = {1178-2390}, abstract = {BACKGROUND: Incidence of skin cancer is one of the global burdens of malignancies that increase each year, with melanoma being the deadliest one. Imaging-based automated skin cancer detection still remains challenging owing to variability in the skin lesions and limited standard dataset availability. Recent research indicates the potential of deep convolutional neural networks (CNN) in predicting outcomes from simple as well as highly complicated images. However, its implementation requires high-class computational facility, that is not feasible in low resource and remote areas of health care. There is potential in combining image and patient's metadata, but the study is still lacking.

OBJECTIVE: We want to develop malignant melanoma detection based on dermoscopic images and patient's metadata using an artificial intelligence (AI) model that will work on low-resource devices.

METHODS: We used an open-access dermatology repository of International Skin Imaging Collaboration (ISIC) Archive dataset consist of 23,801 biopsy-proven dermoscopic images. We tested performance for binary classification malignant melanomas vs nonmalignant melanomas. From 1200 sample images, we split the data for training (72%), validation (18%), and testing (10%). We compared CNN with image data only (CNN model) vs CNN for image data combined with an artificial neural network (ANN) for patient's metadata (CNN+ANN model).

RESULTS: The balanced accuracy for CNN+ANN model was higher (92.34%) than the CNN model (73.69%). Combination of the patient's metadata using ANN prevents the overfitting that occurs in the CNN model using dermoscopic images only. This small size (24 MB) of this model made it possible to run on a medium class computer without the need of cloud computing, suitable for deployment on devices with limited resources.

CONCLUSION: The CNN+ANN model can increase the accuracy of classification in malignant melanoma detection even with limited data and is promising for development as a screening device in remote and low resources health care.}, } @article {pmid33892568, year = {2021}, author = {Wang, B and Liu, F}, title = {Task arrival based energy efficient optimization in smart-IoT data center.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {18}, number = {3}, pages = {2713-2732}, doi = {10.3934/mbe.2021138}, pmid = {33892568}, issn = {1551-0018}, abstract = {With the growth and expansion of cloud data centers, energy consumption has become an urgent issue for smart cities system. However, most of the current resource management approaches focus on the traditional cloud computing scheduling scenarios but fail to consider the feature of workloads from the Internet of Things (IoT) devices. In this paper, we analyze the characteristic of IoT requests and propose an improved Poisson task model with a novel mechanism to predict the arrivals of IoT requests. To achieve the trade-off between energy saving and service level agreement, we introduce an adaptive energy efficiency model to adjust the priority of the optimization objectives. Finally, an energy-efficient virtual machine scheduling algorithm is proposed to maximize the energy efficiency of the data center. The experimental results show that our strategy can achieve the best performance in comparison to other popular schemes.}, } @article {pmid33882012, year = {2021}, author = {Lee, H and Kang, J and Yeo, J}, title = {Medical Specialty Recommendations by an Artificial Intelligence Chatbot on a Smartphone: Development and Deployment.}, journal = {Journal of medical Internet research}, volume = {23}, number = {5}, pages = {e27460}, pmid = {33882012}, issn = {1438-8871}, mesh = {COVID-19/*epidemiology ; *Deep Learning ; Humans ; Pandemics ; Primary Health Care/*methods ; *Referral and Consultation ; SARS-CoV-2/isolation & purification ; *Smartphone ; Specialization ; Telemedicine/*methods ; }, abstract = {BACKGROUND: The COVID-19 pandemic has limited daily activities and even contact between patients and primary care providers. This makes it more difficult to provide adequate primary care services, which include connecting patients to an appropriate medical specialist. A smartphone-compatible artificial intelligence (AI) chatbot that classifies patients' symptoms and recommends the appropriate medical specialty could provide a valuable solution.

OBJECTIVE: In order to establish a contactless method of recommending the appropriate medical specialty, this study aimed to construct a deep learning-based natural language processing (NLP) pipeline and to develop an AI chatbot that can be used on a smartphone.

METHODS: We collected 118,008 sentences containing information on symptoms with labels (medical specialty), conducted data cleansing, and finally constructed a pipeline of 51,134 sentences for this study. Several deep learning models, including 4 different long short-term memory (LSTM) models with or without attention and with or without a pretrained FastText embedding layer, as well as bidirectional encoder representations from transformers for NLP, were trained and validated using a randomly selected test data set. The performance of the models was evaluated on the basis of the precision, recall, F1-score, and area under the receiver operating characteristic curve (AUC). An AI chatbot was also designed to make it easy for patients to use this specialty recommendation system. We used an open-source framework called "Alpha" to develop our AI chatbot. This takes the form of a web-based app with a frontend chat interface capable of conversing in text and a backend cloud-based server application to handle data collection, process the data with a deep learning model, and offer the medical specialty recommendation in a responsive web that is compatible with both desktops and smartphones.

RESULTS: The bidirectional encoder representations from transformers model yielded the best performance, with an AUC of 0.964 and F1-score of 0.768, followed by LSTM model with embedding vectors, with an AUC of 0.965 and F1-score of 0.739. Considering the limitations of computing resources and the wide availability of smartphones, the LSTM model with embedding vectors trained on our data set was adopted for our AI chatbot service. We also deployed an Alpha version of the AI chatbot to be executed on both desktops and smartphones.

CONCLUSIONS: With the increasing need for telemedicine during the current COVID-19 pandemic, an AI chatbot with a deep learning-based NLP model that can recommend a medical specialty to patients through their smartphones would be exceedingly useful. This chatbot allows patients to identify the proper medical specialist in a rapid and contactless manner, based on their symptoms, thus potentially supporting both patients and primary care providers.}, } @article {pmid33879200, year = {2021}, author = {Youn, YC and Pyun, JM and Ryu, N and Baek, MJ and Jang, JW and Park, YH and Ahn, SW and Shin, HW and Park, KY and Kim, SY}, title = {Use of the Clock Drawing Test and the Rey-Osterrieth Complex Figure Test-copy with convolutional neural networks to predict cognitive impairment.}, journal = {Alzheimer's research & therapy}, volume = {13}, number = {1}, pages = {85}, pmid = {33879200}, issn = {1758-9193}, mesh = {Cognition ; *Cognitive Dysfunction/diagnosis ; Humans ; Mass Screening ; Neural Networks, Computer ; Neuropsychological Tests ; }, abstract = {BACKGROUND: The Clock Drawing Test (CDT) and Rey-Osterrieth Complex Figure Test (RCFT) are widely used as a part of neuropsychological test batteries to assess cognitive function. Our objective was to confirm the prediction accuracies of the RCFT-copy and CDT for cognitive impairment (CI) using convolutional neural network algorithms as a screening tool.

METHODS: The CDT and RCFT-copy data were obtained from patients aged 60-80 years who had more than 6 years of education. In total, 747 CDT and 980 RCFT-copy figures were utilized. Convolutional neural network algorithms using TensorFlow (ver. 2.3.0) on the Colab cloud platform (www.colab.

RESEARCH: google.com) were used for preprocessing and modeling. We measured the prediction accuracy of each drawing test 10 times using this dataset with the following classes: normal cognition (NC) vs. mildly impaired cognition (MI), NC vs. severely impaired cognition (SI), and NC vs. CI (MI + SI).

RESULTS: The accuracy of the CDT was better for differentiating MI (CDT, 78.04 ± 2.75; RCFT-copy, not being trained) and SI from NC (CDT, 91.45 ± 0.83; RCFT-copy, 90.27 ± 1.52); however, the RCFT-copy was better at predicting CI (CDT, 77.37 ± 1.77; RCFT, 83.52 ± 1.41). The accuracy for a 3-way classification (NC vs. MI vs. SI) was approximately 71% for both tests; no significant difference was found between them.

CONCLUSIONS: The two drawing tests showed good performance for predicting severe impairment of cognition; however, a drawing test alone is not enough to predict overall CI. There are some limitations to our study: the sample size was small, all the participants did not perform both the CDT and RCFT-copy, and only the copy condition of the RCFT was used. Algorithms involving memory performance and longitudinal changes are worth future exploration. These results may contribute to improved home-based healthcare delivery.}, } @article {pmid33861767, year = {2021}, author = {Li, Y and Wei, J and Wu, B and Wang, C and Wang, C and Zhang, Y and Yang, X}, title = {Obfuscating encrypted threshold signature algorithm and its applications in cloud computing.}, journal = {PloS one}, volume = {16}, number = {4}, pages = {e0250259}, pmid = {33861767}, issn = {1932-6203}, mesh = {*Algorithms ; *Cloud Computing ; *Computer Security ; Humans ; *Privacy ; }, abstract = {Current cloud computing causes serious restrictions to safeguarding users' data privacy. Since users' sensitive data is submitted in unencrypted forms to remote machines possessed and operated by untrusted service providers, users' sensitive data may be leaked by service providers. Program obfuscation shows the unique advantages that it can provide for cloud computing. In this paper, we construct an encrypted threshold signature functionality, which can outsource the threshold signing rights of users to cloud server securely by applying obfuscation, while revealing no more sensitive information. The obfuscator is proven to satisfy the average case virtual black box property and existentially unforgeable under the decisional linear (DLIN) assumption and computational Diffie-Hellman (CDH) assumption in the standard model. Moreover, we implement our scheme using the Java pairing-based cryptography library on a laptop.}, } @article {pmid33859193, year = {2021}, author = {von Chamier, L and Laine, RF and Jukkala, J and Spahn, C and Krentzel, D and Nehme, E and Lerche, M and Hernández-Pérez, S and Mattila, PK and Karinou, E and Holden, S and Solak, AC and Krull, A and Buchholz, TO and Jones, ML and Royer, LA and Leterrier, C and Shechtman, Y and Jug, F and Heilemann, M and Jacquemet, G and Henriques, R}, title = {Democratising deep learning for microscopy with ZeroCostDL4Mic.}, journal = {Nature communications}, volume = {12}, number = {1}, pages = {2276}, pmid = {33859193}, issn = {2041-1723}, support = {MR/K015826/1/MRC_/Medical Research Council/United Kingdom ; MC_UU_00012/1/MRC_/Medical Research Council/United Kingdom ; FC001999/CRUK_/Cancer Research UK/United Kingdom ; 203276/Z/16/Z/WT_/Wellcome Trust/United Kingdom ; 206670/Z/17/Z/WT_/Wellcome Trust/United Kingdom ; FC001999/MRC_/Medical Research Council/United Kingdom ; /WT_/Wellcome Trust/United Kingdom ; FC001999/ARC_/Arthritis Research UK/United Kingdom ; MR/T027924/1/MRC_/Medical Research Council/United Kingdom ; }, mesh = {Animals ; Cell Line, Tumor ; Cloud Computing ; Datasets as Topic ; *Deep Learning ; Humans ; Image Processing, Computer-Assisted/*methods ; Microscopy/*methods ; Primary Cell Culture ; Rats ; Software ; }, abstract = {Deep Learning (DL) methods are powerful analytical tools for microscopy and can outperform conventional image processing pipelines. Despite the enthusiasm and innovations fuelled by DL technology, the need to access powerful and compatible resources to train DL networks leads to an accessibility barrier that novice users often find difficult to overcome. Here, we present ZeroCostDL4Mic, an entry-level platform simplifying DL access by leveraging the free, cloud-based computational resources of Google Colab. ZeroCostDL4Mic allows researchers with no coding expertise to train and apply key DL networks to perform tasks including segmentation (using U-Net and StarDist), object detection (using YOLOv2), denoising (using CARE and Noise2Void), super-resolution microscopy (using Deep-STORM), and image-to-image translation (using Label-free prediction - fnet, pix2pix and CycleGAN). Importantly, we provide suitable quantitative tools for each network to evaluate model performance, allowing model optimisation. We demonstrate the application of the platform to study multiple biological processes.}, } @article {pmid33840908, year = {2021}, author = {Meena, V and Gorripatti, M and Suriya Praba, T}, title = {Trust Enforced Computational Offloading for Health Care Applications in Fog Computing.}, journal = {Wireless personal communications}, volume = {119}, number = {2}, pages = {1369-1386}, pmid = {33840908}, issn = {0929-6212}, abstract = {Internet of Things (IoT) is a network of internet connected devices that generates huge amount of data every day. The usage of IoT devices such as smart wearables, smart phones, smart cities are increasing in the linear scale. Health care is one of the primary applications today that uses IoT devices. Data generated in this application may need computation, storage and data analytics operations which requires resourceful environment for remote patient health monitoring. The data related with health care applications are primarily private and should be readily available to the users. Enforcing these two constraints in cloud environment is a hard task. Fog computing is an emergent architecture for providing computation, storage, control and network services within user's proximity. To handle private data, the processing elements should be trustable entities in Fog environment. In this paper we propose novel Trust Enforced computation ofFLoading technique for trust worthy applications using fOg computiNg (TEFLON). The proposed system comprises of two algorithms namely optimal service offloader and trust assessment for addressing security and trust issues with reduced response time. And the simulation results show that proposed TEFLON framework improves success rate of fog collaboration with reduced average latency for delay sensitive applications and ensures trust for trustworthy applications.}, } @article {pmid33833714, year = {2021}, author = {Trenerry, B and Chng, S and Wang, Y and Suhaila, ZS and Lim, SS and Lu, HY and Oh, PH}, title = {Preparing Workplaces for Digital Transformation: An Integrative Review and Framework of Multi-Level Factors.}, journal = {Frontiers in psychology}, volume = {12}, number = {}, pages = {620766}, pmid = {33833714}, issn = {1664-1078}, abstract = {The rapid advancement of new digital technologies, such as smart technology, artificial intelligence (AI) and automation, robotics, cloud computing, and the Internet of Things (IoT), is fundamentally changing the nature of work and increasing concerns about the future of jobs and organizations. To keep pace with rapid disruption, companies need to update and transform business models to remain competitive. Meanwhile, the growth of advanced technologies is changing the types of skills and competencies needed in the workplace and demanded a shift in mindset among individuals, teams and organizations. The recent COVID-19 pandemic has accelerated digitalization trends, while heightening the importance of employee resilience and well-being in adapting to widespread job and technological disruption. Although digital transformation is a new and urgent imperative, there is a long trajectory of rigorous research that can readily be applied to grasp these emerging trends. Recent studies and reviews of digital transformation have primarily focused on the business and strategic levels, with only modest integration of employee-related factors. Our review article seeks to fill these critical gaps by identifying and consolidating key factors important for an organization's overarching digital transformation. We reviewed studies across multiple disciplines and integrated the findings into a multi-level framework. At the individual level, we propose five overarching factors related to effective digital transformation among employees: technology adoption; perceptions and attitudes toward technological change; skills and training; workplace resilience and adaptability, and work-related wellbeing. At the group-level, we identified three factors necessary for digital transformation: team communication and collaboration; workplace relationships and team identification, and team adaptability and resilience. Finally, at the organizational-level, we proposed three factors for digital transformation: leadership; human resources, and organizational culture/climate. Our review of the literature confirms that multi-level factors are important when planning for and embarking on digital transformation, thereby providing a framework for future research and practice.}, } @article {pmid33833303, year = {2021}, author = {Armgarth, A and Pantzare, S and Arven, P and Lassnig, R and Jinno, H and Gabrielsson, EO and Kifle, Y and Cherian, D and Arbring Sjöström, T and Berthou, G and Dowling, J and Someya, T and Wikner, JJ and Gustafsson, G and Simon, DT and Berggren, M}, title = {A digital nervous system aiming toward personalized IoT healthcare.}, journal = {Scientific reports}, volume = {11}, number = {1}, pages = {7757}, pmid = {33833303}, issn = {2045-2322}, abstract = {Body area networks (BANs), cloud computing, and machine learning are platforms that can potentially enable advanced healthcare outside the hospital. By applying distributed sensors and drug delivery devices on/in our body and connecting to such communication and decision-making technology, a system for remote diagnostics and therapy is achieved with additional autoregulation capabilities. Challenges with such autarchic on-body healthcare schemes relate to integrity and safety, and interfacing and transduction of electronic signals into biochemical signals, and vice versa. Here, we report a BAN, comprising flexible on-body organic bioelectronic sensors and actuators utilizing two parallel pathways for communication and decision-making. Data, recorded from strain sensors detecting body motion, are both securely transferred to the cloud for machine learning and improved decision-making, and sent through the body using a secure body-coupled communication protocol to auto-actuate delivery of neurotransmitters, all within seconds. We conclude that both highly stable and accurate sensing-from multiple sensors-are needed to enable robust decision making and limit the frequency of retraining. The holistic platform resembles the self-regulatory properties of the nervous system, i.e., the ability to sense, communicate, decide, and react accordingly, thus operating as a digital nervous system.}, } @article {pmid33830059, year = {2021}, author = {Li, Y and Ye, H and Ye, F and Liu, Y and Lv, L and Zhang, P and Zhang, X and Zhou, Y}, title = {The Current Situation and Future Prospects of Simulators in Dental Education.}, journal = {Journal of medical Internet research}, volume = {23}, number = {4}, pages = {e23635}, pmid = {33830059}, issn = {1438-8871}, mesh = {Clinical Competence ; Computer Simulation ; Education, Dental ; Humans ; Software ; *Virtual Reality ; }, abstract = {The application of virtual reality has become increasingly extensive as this technology has developed. In dental education, virtual reality is mainly used to assist or replace traditional methods of teaching clinical skills in preclinical training for several subjects, such as endodontics, prosthodontics, periodontics, implantology, and dental surgery. The application of dental simulators in teaching can make up for the deficiency of traditional teaching methods and reduce the teaching burden, improving convenience for both teachers and students. However, because of the technology limitations of virtual reality and force feedback, dental simulators still have many hardware and software disadvantages that have prevented them from being an alternative to traditional dental simulators as a primary skill training method. In the future, when combined with big data, cloud computing, 5G, and deep learning technology, dental simulators will be able to give students individualized learning assistance, and their functions will be more diverse and suitable for preclinical training. The purpose of this review is to provide an overview of current dental simulators on related technologies, advantages and disadvantages, methods of evaluating effectiveness, and future directions for development.}, } @article {pmid33824714, year = {2021}, author = {Li, F and Qu, Z and Li, R}, title = {Medical Cloud Computing Data Processing to Optimize the Effect of Drugs.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {5560691}, pmid = {33824714}, issn = {2040-2309}, mesh = {*Cloud Computing ; Endothelial Cells ; Information Storage and Retrieval ; *Pharmaceutical Preparations ; Software ; }, abstract = {In recent years, cloud computing technology is maturing in the process of growing. Hadoop originated from Apache Nutch and is an open-source cloud computing platform. Moreover, the platform is characterized by large scale, virtualization, strong stability, strong versatility, and support for scalability. It is necessary and far-reaching, based on the characteristics of unstructured medical images, to combine content-based medical image retrieval with the Hadoop cloud platform to conduct research. This study combines the impact mechanism of senile dementia vascular endothelial cells with cloud computing to construct a corresponding data retrieval platform of the cloud computing image set. Moreover, this study uses Hadoop's core framework distributed file system HDFS to upload images, store the images in the HDFS and image feature vectors in HBase, and use MapReduce programming mode to perform parallel retrieval, and each of the nodes cooperates with each other. The results show that the proposed method has certain effects and can be applied to medical research.}, } @article {pmid33822891, year = {2021}, author = {Arisdakessian, CG and Nigro, OD and Steward, GF and Poisson, G and Belcaid, M}, title = {CoCoNet: an efficient deep learning tool for viral metagenome binning.}, journal = {Bioinformatics (Oxford, England)}, volume = {37}, number = {18}, pages = {2803-2810}, doi = {10.1093/bioinformatics/btab213}, pmid = {33822891}, issn = {1367-4811}, support = {1636402//National Science Foundation Division of Ocean Sciences/ ; 1557349-Ike Wai//Office of Integrative Activities/ ; 1736030-G2P//Securing Hawaii's Water Future/ ; }, mesh = {Metagenome ; Algorithms ; *Deep Learning ; Software ; *Microbiota/genetics ; Sequence Analysis, DNA/methods ; Metagenomics/methods ; }, abstract = {MOTIVATION: Metagenomic approaches hold the potential to characterize microbial communities and unravel the intricate link between the microbiome and biological processes. Assembly is one of the most critical steps in metagenomics experiments. It consists of transforming overlapping DNA sequencing reads into sufficiently accurate representations of the community's genomes. This process is computationally difficult and commonly results in genomes fragmented across many contigs. Computational binning methods are used to mitigate fragmentation by partitioning contigs based on their sequence composition, abundance or chromosome organization into bins representing the community's genomes. Existing binning methods have been principally tuned for bacterial genomes and do not perform favorably on viral metagenomes.

RESULTS: We propose Composition and Coverage Network (CoCoNet), a new binning method for viral metagenomes that leverages the flexibility and the effectiveness of deep learning to model the co-occurrence of contigs belonging to the same viral genome and provide a rigorous framework for binning viral contigs. Our results show that CoCoNet substantially outperforms existing binning methods on viral datasets.

CoCoNet was implemented in Python and is available for download on PyPi (https://pypi.org/). The source code is hosted on GitHub at https://github.com/Puumanamana/CoCoNet and the documentation is available at https://coconet.readthedocs.io/en/latest/index.html. CoCoNet does not require extensive resources to run. For example, binning 100k contigs took about 4 h on 10 Intel CPU Cores (2.4 GHz), with a memory peak at 27 GB (see Supplementary Fig. S9). To process a large dataset, CoCoNet may need to be run on a high RAM capacity server. Such servers are typically available in high-performance or cloud computing settings.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid33817018, year = {2021}, author = {Iyer, TJ and Joseph Raj, AN and Ghildiyal, S and Nersisson, R}, title = {Performance analysis of lightweight CNN models to segment infectious lung tissues of COVID-19 cases from tomographic images.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e368}, pmid = {33817018}, issn = {2376-5992}, abstract = {The pandemic of Coronavirus Disease-19 (COVID-19) has spread around the world, causing an existential health crisis. Automated detection of COVID-19 infections in the lungs from Computed Tomography (CT) images offers huge potential in tackling the problem of slow detection and augments the conventional diagnostic procedures. However, segmenting COVID-19 from CT Scans is problematic, due to high variations in the types of infections and low contrast between healthy and infected tissues. While segmenting Lung CT Scans for COVID-19, fast and accurate results are required and furthermore, due to the pandemic, most of the research community has opted for various cloud based servers such as Google Colab, etc. to develop their algorithms. High accuracy can be achieved using Deep Networks but the prediction time would vary as the resources are shared amongst many thus requiring the need to compare different lightweight segmentation model. To address this issue, we aim to analyze the segmentation of COVID-19 using four Convolutional Neural Networks (CNN). The images in our dataset are preprocessed where the motion artifacts are removed. The four networks are UNet, Segmentation Network (Seg Net), High-Resolution Network (HR Net) and VGG UNet. Trained on our dataset of more than 3,000 images, HR Net was found to be the best performing network achieving an accuracy of 96.24% and a Dice score of 0.9127. The analysis shows that lightweight CNN models perform better than other neural net models when to segment infectious tissue due to COVID-19 from CT slices.}, } @article {pmid33817001, year = {2021}, author = {Rizwan Ali, M and Ahmad, F and Hasanain Chaudary, M and Ashfaq Khan, Z and Alqahtani, MA and Saad Alqurni, J and Ullah, Z and Khan, WU}, title = {Petri Net based modeling and analysis for improved resource utilization in cloud computing.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e351}, pmid = {33817001}, issn = {2376-5992}, abstract = {The cloud is a shared pool of systems that provides multiple resources through the Internet, users can access a lot of computing power using their computer. However, with the strong migration rate of multiple applications towards the cloud, more disks and servers are required to store huge data. Most of the cloud storage service providers are replicating full copies of data over multiple data centers to ensure data availability. Further, the replication is not only a costly process but also a wastage of energy resources. Furthermore, erasure codes reduce the storage cost by splitting data in n chunks and storing these chunks into n + k different data centers, to tolerate k failures. Moreover, it also needs extra computation cost to regenerate the data object. Cache-A Replica On Modification (CAROM) is a hybrid file system that gets combined benefits from both the replication and erasure codes to reduce access latency and bandwidth consumption. However, in the literature, no formal analysis of CAROM is available which can validate its performance. To address this issue, this research firstly presents a colored Petri net based formal model of CAROM. The research proceeds by presenting a formal analysis and simulation to validate the performance of the proposed system. This paper contributes towards the utilization of resources in clouds by presenting a comprehensive formal analysis of CAROM.}, } @article {pmid33816176, year = {2021}, author = {Wan, KW and Wong, CH and Ip, HF and Fan, D and Yuen, PL and Fong, HY and Ying, M}, title = {Evaluation of the performance of traditional machine learning algorithms, convolutional neural network and AutoML Vision in ultrasound breast lesions classification: a comparative study.}, journal = {Quantitative imaging in medicine and surgery}, volume = {11}, number = {4}, pages = {1381-1393}, pmid = {33816176}, issn = {2223-4292}, abstract = {BACKGROUND: In recent years, there was an increasing popularity in applying artificial intelligence in the medical field from computer-aided diagnosis (CAD) to patient prognosis prediction. Given the fact that not all healthcare professionals have the required expertise to develop a CAD system, the aim of this study was to investigate the feasibility of using AutoML Vision, a highly automatic machine learning model, for future clinical applications by comparing AutoML Vision with some commonly used CAD algorithms in the differentiation of benign and malignant breast lesions on ultrasound.

METHODS: A total of 895 breast ultrasound images were obtained from the two online open-access ultrasound breast images datasets. Traditional machine learning models (comprising of seven commonly used CAD algorithms) with three content-based radiomic features (Hu Moments, Color Histogram, Haralick Texture) extracted, and a convolutional neural network (CNN) model were built using python language. AutoML Vision was trained in Google Cloud Platform. Sensitivity, specificity, F1 score and average precision (AUCPR) were used to evaluate the diagnostic performance of the models. Cochran's Q test was used to evaluate the statistical significance between all studied models and McNemar test was used as the post-hoc test to perform pairwise comparisons. The proposed AutoML model was also compared with the current related studies that involve similar medical imaging modalities in characterizing benign or malignant breast lesions.

RESULTS: There was significant difference in the diagnostic performance among all studied traditional machine learning classifiers (P<0.05). Random Forest achieved the best performance in the differentiation of benign and malignant breast lesions (accuracy: 90%; sensitivity: 71%; specificity: 100%; F1 score: 0.83; AUCPR: 0.90) which was statistically comparable to the performance of CNN (accuracy: 91%; sensitivity: 82%; specificity: 96%; F1 score: 0.87; AUCPR: 0.88) and AutoML Vision (accuracy: 86%; sensitivity: 84%; specificity: 88%; F1 score: 0.83; AUCPR: 0.95) based on Cochran's Q test (P>0.05).

CONCLUSIONS: In this study, the performance of AutoML Vision was not significantly different from that of Random Forest (the best classifier among traditional machine learning models) and CNN. AutoML Vision showed relatively high accuracy and comparable to current commonly used classifiers which may prompt for future application in clinical practice.}, } @article {pmid33814966, year = {2021}, author = {Andreas, A and Mavromoustakis, CX and Mastorakis, G and Do, DT and Batalla, JM and Pallis, E and Markakis, EK}, title = {Towards an optimized security approach to IoT devices with confidential healthcare data exchange.}, journal = {Multimedia tools and applications}, volume = {80}, number = {20}, pages = {31435-31449}, pmid = {33814966}, issn = {1380-7501}, abstract = {Reliable data exchange and efficient image transfer are currently significant research challenges in health care systems. To incentivize data exchange within the Internet of Things (IoT) framework, we need to ensure data sovereignty by facilitating secure data exchange between trusted parties. The security and reliability of data-sharing infrastructure require a community of trust. Therefore, this paper introduces an encryption frame based on data fragmentation. It also presents a novel, deterministic grey-scale optical encryption scheme based on fundamental mathematics. The objective is to use encryption as the underlying measure to make the data unintelligible while exploiting fragmentation to break down sensitive relationships between attributes. Thus, sensitive data distributed in separate data repositories for decryption and reconstruction using interpolation by knowing polynomial coefficients and personal values from the DBMS Database Management System. Aims also to ensure the secure acquisition of diagnostic images, micrography, and all types of medical imagery based on probabilistic approaches. Visual sharing of confidential medical imageries based on implementing a novel method, where transparencies ≤k - 1 out of n cannot reveal the original image.}, } @article {pmid33808037, year = {2021}, author = {Mijuskovic, A and Chiumento, A and Bemthuis, R and Aldea, A and Havinga, P}, title = {Resource Management Techniques for Cloud/Fog and Edge Computing: An Evaluation Framework and Classification.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {5}, pages = {}, pmid = {33808037}, issn = {1424-8220}, abstract = {Processing IoT applications directly in the cloud may not be the most efficient solution for each IoT scenario, especially for time-sensitive applications. A promising alternative is to use fog and edge computing, which address the issue of managing the large data bandwidth needed by end devices. These paradigms impose to process the large amounts of generated data close to the data sources rather than in the cloud. One of the considerations of cloud-based IoT environments is resource management, which typically revolves around resource allocation, workload balance, resource provisioning, task scheduling, and QoS to achieve performance improvements. In this paper, we review resource management techniques that can be applied for cloud, fog, and edge computing. The goal of this review is to provide an evaluation framework of metrics for resource management algorithms aiming at the cloud/fog and edge environments. To this end, we first address research challenges on resource management techniques in that domain. Consequently, we classify current research contributions to support in conducting an evaluation framework. One of the main contributions is an overview and analysis of research papers addressing resource management techniques. Concluding, this review highlights opportunities of using resource management techniques within the cloud/fog/edge paradigm. This practice is still at early development and barriers need to be overcome.}, } @article {pmid33807986, year = {2021}, author = {H Hasan, M and Abbasalipour, A and Nikfarjam, H and Pourkamali, S and Emad-Ud-Din, M and Jafari, R and Alsaleem, F}, title = {Exploiting Pull-In/Pull-Out Hysteresis in Electrostatic MEMS Sensor Networks to Realize a Novel Sensing Continuous-Time Recurrent Neural Network.}, journal = {Micromachines}, volume = {12}, number = {3}, pages = {}, pmid = {33807986}, issn = {2072-666X}, support = {1935641//National Science Foundation/ ; }, abstract = {The goal of this paper is to provide a novel computing approach that can be used to reduce the power consumption, size, and cost of wearable electronics. To achieve this goal, the use of microelectromechanical systems (MEMS) sensors for simultaneous sensing and computing is introduced. Specifically, by enabling sensing and computing locally at the MEMS sensor node and utilizing the usually unwanted pull in/out hysteresis, we may eliminate the need for cloud computing and reduce the use of analog-to-digital converters, sampling circuits, and digital processors. As a proof of concept, we show that a simulation model of a network of three commercially available MEMS accelerometers can classify a train of square and triangular acceleration signals inherently using pull-in and release hysteresis. Furthermore, we develop and fabricate a network with finger arrays of parallel plate actuators to facilitate coupling between MEMS devices in the network using actuating assemblies and biasing assemblies, thus bypassing the previously reported coupling challenge in MEMS neural networks.}, } @article {pmid33807759, year = {2021}, author = {Pintavirooj, C and Keatsamarn, T and Treebupachatsakul, T}, title = {Multi-Parameter Vital Sign Telemedicine System Using Web Socket for COVID-19 Pandemics.}, journal = {Healthcare (Basel, Switzerland)}, volume = {9}, number = {3}, pages = {}, pmid = {33807759}, issn = {2227-9032}, support = {2563-02-01-007//King Mongkut's Institute of Technology Ladkrabang/ ; }, abstract = {Telemedicine has become an increasingly important part of the modern healthcare infrastructure, especially in the present situation with the COVID-19 pandemics. Many cloud platforms have been used intensively for Telemedicine. The most popular ones include PubNub, Amazon Web Service, Google Cloud Platform and Microsoft Azure. One of the crucial challenges of telemedicine is the real-time application monitoring for the vital sign. The commercial platform is, by far, not suitable for real-time applications. The alternative is to design a web-based application exploiting Web Socket. This research paper concerns the real-time six-parameter vital-sign monitoring using a web-based application. The six vital-sign parameters are electrocardiogram, temperature, plethysmogram, percent saturation oxygen, blood pressure and heart rate. The six vital-sign parameters were encoded in a web server site and sent to a client site upon logging on. The encoded parameters were then decoded into six vital sign signals. Our proposed multi-parameter vital-sign telemedicine system using Web Socket has successfully remotely monitored the six-parameter vital signs on 4G mobile network with a latency of less than 5 milliseconds.}, } @article {pmid33806888, year = {2021}, author = {Kang, S and David, DSK and Yang, M and Yu, YC and Ham, S}, title = {Energy-Efficient Ultrasonic Water Level Detection System with Dual-Target Monitoring.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {6}, pages = {}, pmid = {33806888}, issn = {1424-8220}, support = {20STUTA26//U.S. Department of Transportation/ ; }, abstract = {This study presents a developed ultrasonic water level detection (UWLD) system with an energy-efficient design and dual-target monitoring. The water level monitoring system with a non-contact sensor is one of the suitable methods since it is not directly exposed to water. In addition, a web-based monitoring system using a cloud computing platform is a well-known technique to provide real-time water level monitoring. However, the long-term stable operation of remotely communicating units is an issue for real-time water level monitoring. Therefore, this paper proposes a UWLD unit using a low-power consumption design for renewable energy harvesting (e.g., solar) by controlling the unit with dual microcontrollers (MCUs) to improve the energy efficiency of the system. In addition, dual targeting to the pavement and streamside is uniquely designed to monitor both the urban inundation and stream overflow. The real-time water level monitoring data obtained from the proposed UWLD system is analyzed with water level changing rate (WLCR) and water level index. The quantified WLCR and water level index with various sampling rates present a different sensitivity to heavy rain.}, } @article {pmid33806770, year = {2021}, author = {Sergi, I and Montanaro, T and Benvenuto, FL and Patrono, L}, title = {A Smart and Secure Logistics System Based on IoT and Cloud Technologies.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {6}, pages = {}, pmid = {33806770}, issn = {1424-8220}, support = {TECNONIDI PROGRAMMA OPERATIVO REGIONALE 2014-2020 REGIONE PUGLIA//Regione Puglia/ ; }, abstract = {Recently, one of the hottest topics in the logistics sector has been the traceability of goods and the monitoring of their condition during transportation. Perishable goods, such as fresh goods, have specifically attracted attention of the researchers that have already proposed different solutions to guarantee quality and freshness of food through the whole cold chain. In this regard, the use of Internet of Things (IoT)-enabling technologies and its specific branch called edge computing is bringing different enhancements thereby achieving easy remote and real-time monitoring of transported goods. Due to the fast changes of the requirements and the difficulties that researchers can encounter in proposing new solutions, the fast prototype approach could contribute to rapidly enhance both the research and the commercial sector. In order to make easy the fast prototyping of solutions, different platforms and tools have been proposed in the last years, however it is difficult to guarantee end-to-end security at all the levels through such platforms. For this reason, based on the experiments reported in literature and aiming at providing support for fast-prototyping, end-to-end security in the logistics sector, the current work presents a solution that demonstrates how the advantages offered by the Azure Sphere platform, a dedicated hardware (i.e., microcontroller unit, the MT3620) device and Azure Sphere Security Service can be used to realize a fast prototype to trace fresh food conditions through its transportation. The proposed solution guarantees end-to-end security and can be exploited by future similar works also in other sectors.}, } @article {pmid33805471, year = {2021}, author = {El-Rashidy, N and El-Sappagh, S and Islam, SMR and M El-Bakry, H and Abdelrazek, S}, title = {Mobile Health in Remote Patient Monitoring for Chronic Diseases: Principles, Trends, and Challenges.}, journal = {Diagnostics (Basel, Switzerland)}, volume = {11}, number = {4}, pages = {}, pmid = {33805471}, issn = {2075-4418}, abstract = {Chronic diseases are becoming more widespread. Treatment and monitoring of these diseases require going to hospitals frequently, which increases the burdens of hospitals and patients. Presently, advancements in wearable sensors and communication protocol contribute to enriching the healthcare system in a way that will reshape healthcare services shortly. Remote patient monitoring (RPM) is the foremost of these advancements. RPM systems are based on the collection of patient vital signs extracted using invasive and noninvasive techniques, then sending them in real-time to physicians. These data may help physicians in taking the right decision at the right time. The main objective of this paper is to outline research directions on remote patient monitoring, explain the role of AI in building RPM systems, make an overview of the state of the art of RPM, its advantages, its challenges, and its probable future directions. For studying the literature, five databases have been chosen (i.e., science direct, IEEE-Explore, Springer, PubMed, and science.gov). We followed the (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) PRISMA, which is a standard methodology for systematic reviews and meta-analyses. A total of 56 articles are reviewed based on the combination of a set of selected search terms including RPM, data mining, clinical decision support system, electronic health record, cloud computing, internet of things, and wireless body area network. The result of this study approved the effectiveness of RPM in improving healthcare delivery, increase diagnosis speed, and reduce costs. To this end, we also present the chronic disease monitoring system as a case study to provide enhanced solutions for RPMs.}, } @article {pmid33805187, year = {2021}, author = {Lovén, L and Lähderanta, T and Ruha, L and Peltonen, E and Launonen, I and Sillanpää, MJ and Riekki, J and Pirttikangas, S}, title = {EDISON: An Edge-Native Method and Architecture for Distributed Interpolation.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {7}, pages = {}, pmid = {33805187}, issn = {1424-8220}, support = {318927//Academy of Finland/ ; 877056//Electronic Components and Systems for European Leadership/ ; N/A//Infotech Oulu research institute/ ; N/A//Future Makers program of the Jane and Aatos Erkko Foundation and the Technology Industries of Finland Centennial Foundation/ ; personal grant for L.L.//Tauno Tönning foundation/ ; }, abstract = {Spatio-temporal interpolation provides estimates of observations in unobserved locations and time slots. In smart cities, interpolation helps to provide a fine-grained contextual and situational understanding of the urban environment, in terms of both short-term (e.g., weather, air quality, traffic) or long term (e.g., crime, demographics) spatio-temporal phenomena. Various initiatives improve spatio-temporal interpolation results by including additional data sources such as vehicle-fitted sensors, mobile phones, or micro weather stations of, for example, smart homes. However, the underlying computing paradigm in such initiatives is predominantly centralized, with all data collected and analyzed in the cloud. This solution is not scalable, as when the spatial and temporal density of sensor data grows, the required transmission bandwidth and computational capacity become unfeasible. To address the scaling problem, we propose EDISON: algorithms for distributed learning and inference, and an edge-native architecture for distributing spatio-temporal interpolation models, their computations, and the observed data vertically and horizontally between device, edge and cloud layers. We demonstrate EDISON functionality in a controlled, simulated spatio-temporal setup with 1 M artificial data points. While the main motivation of EDISON is the distribution of the heavy computations, the results show that EDISON also provides an improvement over alternative approaches, reaching at best a 10% smaller RMSE than a global interpolation and 6% smaller RMSE than a baseline distributed approach.}, } @article {pmid33803561, year = {2021}, author = {Zhang, J and Lu, C and Cheng, G and Guo, T and Kang, J and Zhang, X and Yuan, X and Yan, X}, title = {A Blockchain-Based Trusted Edge Platform in Edge Computing Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {6}, pages = {}, pmid = {33803561}, issn = {1424-8220}, abstract = {Edge computing is a product of the evolution of IoT and the development of cloud computing technology, providing computing, storage, network, and other infrastructure close to users. Compared with the centralized deployment model of traditional cloud computing, edge computing solves the problems of extended communication time and high convergence traffic, providing better support for low latency and high bandwidth services. With the increasing amount of data generated by users and devices in IoT, security and privacy issues in the edge computing environment have become concerns. Blockchain, a security technology developed rapidly in recent years, has been adopted by many industries, such as finance and insurance. With the edge computing capability, deploying blockchain platforms/applications on edge computing platforms can provide security services for network edge environments. Although there are already solutions for integrating edge computing with blockchain in many IoT application scenarios, they slightly lack scalability, portability, and heterogeneous data processing. In this paper, we propose a trusted edge platform to integrate the edge computing framework and blockchain network for building an edge security environment. The proposed platform aims to preserve the data privacy of the edge computing client. The design based on the microservice architecture makes the platform lighter. To improve the portability of the platform, we introduce the Edgex Foundry framework and design an edge application module on the platform to improve the business capability of Edgex. Simultaneously, we designed a series of well-defined security authentication microservices. These microservices use the Hyperledger Fabric blockchain network to build a reliable security mechanism in the edge environment. Finally, we build an edge computing network using different hardware devices and deploy the trusted edge platform on multiple network nodes. The usability of the proposed platform is demonstrated by testing the round-trip time (RTT) of several important workflows. The experimental results demonstrate that the platform can meet the availability requirements in real-world usage scenarios.}, } @article {pmid33803360, year = {2021}, author = {Klein, I and Oppelt, N and Kuenzer, C}, title = {Application of Remote Sensing Data for Locust Research and Management-A Review.}, journal = {Insects}, volume = {12}, number = {3}, pages = {}, pmid = {33803360}, issn = {2075-4450}, abstract = {Recently, locust outbreaks around the world have destroyed agricultural and natural vegetation and caused massive damage endangering food security. Unusual heavy rainfalls in habitats of the desert locust (Schistocerca gregaria) and lack of monitoring due to political conflicts or inaccessibility of those habitats lead to massive desert locust outbreaks and swarms migrating over the Arabian Peninsula, East Africa, India and Pakistan. At the same time, swarms of the Moroccan locust (Dociostaurus maroccanus) in some Central Asian countries and swarms of the Italian locust (Calliptamus italicus) in Russia and China destroyed crops despite developed and ongoing monitoring and control measurements. These recent events underline that the risk and damage caused by locust pests is as present as ever and affects 100 million of human lives despite technical progress in locust monitoring, prediction and control approaches. Remote sensing has become one of the most important data sources in locust management. Since the 1980s, remote sensing data and applications have accompanied many locust management activities and contributed to an improved and more effective control of locust outbreaks and plagues. Recently, open-access remote sensing data archives as well as progress in cloud computing provide unprecedented opportunity for remote sensing-based locust management and research. Additionally, unmanned aerial vehicle (UAV) systems bring up new prospects for a more effective and faster locust control. Nevertheless, the full capacity of available remote sensing applications and possibilities have not been exploited yet. This review paper provides a comprehensive and quantitative overview of international research articles focusing on remote sensing application for locust management and research. We reviewed 110 articles published over the last four decades, and categorized them into different aspects and main research topics to summarize achievements and gaps for further research and application development. The results reveal a strong focus on three species-the desert locust, the migratory locust (Locusta migratoria), and the Australian plague locust (Chortoicetes terminifera)-and corresponding regions of interest. There is still a lack of international studies for other pest species such as the Italian locust, the Moroccan locust, the Central American locust (Schistocerca piceifrons), the South American locust (Schistocerca cancellata), the brown locust (Locustana pardalina) and the red locust (Nomadacris septemfasciata). In terms of applied sensors, most studies utilized Advanced Very-High-Resolution Radiometer (AVHRR), Satellite Pour l'Observation de la Terre VEGETATION (SPOT-VGT), Moderate-Resolution Imaging Spectroradiometer (MODIS) as well as Landsat data focusing mainly on vegetation monitoring or land cover mapping. Application of geomorphological metrics as well as radar-based soil moisture data is comparably rare despite previous acknowledgement of their importance for locust outbreaks. Despite great advance and usage of available remote sensing resources, we identify several gaps and potential for future research to further improve the understanding and capacities of the use of remote sensing in supporting locust outbreak- research and management.}, } @article {pmid33803329, year = {2021}, author = {Poniszewska-Marańda, A and Czechowska, E}, title = {Kubernetes Cluster for Automating Software Production Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {5}, pages = {}, pmid = {33803329}, issn = {1424-8220}, abstract = {Microservices, Continuous Integration and Delivery, Docker, DevOps, Infrastructure as Code-these are the current trends and buzzwords in the technological world of 2020. A popular tool which can facilitate the deployment and maintenance of microservices is Kubernetes. Kubernetes is a platform for running containerized applications, for example microservices. There are two main questions which answer was important for us: how to deploy Kubernetes itself and how to ensure that the deployment fulfils the needs of a production environment. Our research concentrates on the analysis and evaluation of Kubernetes cluster as the software production environment. However, firstly it is necessary to determine and evaluate the requirements of production environment. The paper presents the determination and analysis of such requirements and their evaluation in the case of Kubernetes cluster. Next, the paper compares two methods of deploying a Kubernetes cluster: kops and eksctl. Both of the methods concern the AWS cloud, which was chosen mainly because of its wide popularity and the range of provided services. Besides the two chosen methods of deployment, there are many more, including the DIY method and deploying on-premises.}, } @article {pmid33802673, year = {2021}, author = {Hadzovic, S and Mrdovic, S and Radonjic, M}, title = {Identification of IoT Actors.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {6}, pages = {}, pmid = {33802673}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) is a leading trend with numerous opportunities accompanied by advantages as well as disadvantages. Parallel with IoT development, significant privacy and personal data protection challenges are also growing. In this regard, the General Data Protection Regulation (GDPR) is often considered the world's strongest set of data protection rules and has proven to be a catalyst for many countries around the world. The concepts and interaction of the data controller, the joint controllers, and the data processor play a key role in the implementation of the GDPR. Therefore, clarifying the blurred IoT actors' relationships to determine corresponding responsibilities is necessary. Given the IoT transformation reflected in shifting computing power from cloud to the edge, in this research we have considered how these computing paradigms are affecting IoT actors. In this regard, we have introduced identification of IoT actors according to a new five-computing layer IoT model based on the cloud, fog, edge, mist, and dew computing. Our conclusion is that identifying IoT actors in the light of the corresponding IoT data manager roles could be useful in determining the responsibilities of IoT actors for their compliance with data protection and privacy rules.}, } @article {pmid33802669, year = {2021}, author = {Sedar, R and Vázquez-Gallego, F and Casellas, R and Vilalta, R and Muñoz, R and Silva, R and Dizambourg, L and Fernández Barciela, AE and Vilajosana, X and Datta, SK and Härri, J and Alonso-Zarate, J}, title = {Standards-Compliant Multi-Protocol On-Board Unit for the Evaluation of Connected and Automated Mobility Services in Multi-Vendor Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {6}, pages = {}, pmid = {33802669}, issn = {1424-8220}, support = {825050//Horizon 2020 Framework Programme/ ; }, abstract = {Vehicle-to-everything (V2X) communications enable real-time information exchange between vehicles and infrastructure, which extends the perception range of vehicles beyond the limits of on-board sensors and, thus, facilitating the realisation of cooperative, connected, and automated mobility (CCAM) services that will improve road safety and traffic efficiency. In the context of CCAM, the successful deployments of cooperative intelligent transport system (C-ITS) use cases, with the integration of advanced wireless communication technologies, are effectively leading to make transport safer and more efficient. However, the evaluation of multi-vendor and multi-protocol based CCAM service architectures can become challenging and complex. Additionally, conducting on-demand field trials of such architectures with real vehicles involved is prohibitively expensive and time-consuming. In order to overcome these obstacles, in this paper, we present the development of a standards-compliant experimental vehicular on-board unit (OBU) that supports the integration of multiple V2X protocols from different vendors to communicate with heterogeneous cloud-based services that are offered by several original equipment manufacturers (OEMs). We experimentally demonstrate the functionalities of the OBU in a real-world deployment of a cooperative collision avoidance service infrastructure that is based on edge and cloud servers. In addition, we measure end-to-end application-level latencies of multi-protocol supported V2X information flows to show the effectiveness of interoperability in V2X communications between different vehicle OEMs.}, } @article {pmid33800530, year = {2021}, author = {Wang, Y and Wang, L and Zheng, R and Zhao, X and Liu, M}, title = {Latency-Optimal Computational Offloading Strategy for Sensitive Tasks in Smart Homes.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {7}, pages = {}, pmid = {33800530}, issn = {1424-8220}, support = {61976243//National Natural Science Foundation of China/ ; 61971458//National Natural Science Foundation of China/ ; }, abstract = {In smart homes, the computational offloading technology of edge cloud computing (ECC) can effectively deal with the large amount of computation generated by smart devices. In this paper, we propose a computational offloading strategy for minimizing delay based on the back-pressure algorithm (BMDCO) to get the offloading decision and the number of tasks that can be offloaded. Specifically, we first construct a system with multiple local smart device task queues and multiple edge processor task queues. Then, we formulate an offloading strategy to minimize the queue length of tasks in each time slot by minimizing the Lyapunov drift optimization problem, so as to realize the stability of queues and improve the offloading performance. In addition, we give a theoretical analysis on the stability of the BMDCO algorithm by deducing the upper bound of all queues in this system. The simulation results show the stability of the proposed algorithm, and demonstrate that the BMDCO algorithm is superior to other alternatives. Compared with other algorithms, this algorithm can effectively reduce the computation delay.}, } @article {pmid33800262, year = {2021}, author = {Agapiou, A}, title = {Multi-Temporal Change Detection Analysis of Vertical Sprawl over Limassol City Centre and Amathus Archaeological Site in Cyprus during 2015-2020 Using the Sentinel-1 Sensor and the Google Earth Engine Platform.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {5}, pages = {}, pmid = {33800262}, issn = {1424-8220}, support = {EXCELLENCE/0918/0052//Republic of Cyprus and the Structural Funds of the European Union in Cyprus/ ; }, abstract = {Urban sprawl can negatively impact the archaeological record of an area. In order to study the urbanisation process and its patterns, satellite images were used in the past to identify land-use changes and detect individual buildings and constructions. However, this approach involves the acquisition of high-resolution satellite images, the cost of which is increases according to the size of the area under study, as well as the time interval of the analysis. In this paper, we implemented a quick, automatic and low-cost exploration of large areas, for addressing this purpose, aiming to provide at a medium resolution of an overview of the landscape changes. This study focuses on using radar Sentinel-1 images to monitor and detect multi-temporal changes during the period 2015-2020 in Limassol, Cyprus. In addition, the big data cloud platform, Google Earth Engine, was used to process the data. Three different change detection methods were implemented in this platform as follow: (a) vertical transmit, vertical receive (VV) and vertical transmit, horizontal receive (VH) polarisations pseudo-colour composites; (b) the Rapid and Easy Change Detection in Radar Time-Series by Variation Coefficient (REACTIV) Google Earth Engine algorithm; and (c) a multi-temporal Wishart-based change detection algorithm. The overall findings are presented for the wider area of the Limassol city, with special focus on the archaeological site of "Amathus" and the city centre of Limassol. For validation purposes, satellite images from the multi-temporal archive from the Google Earth platform were used. The methods mentioned above were able to capture the urbanization process of the city that has been initiated during this period due to recent large construction projects.}, } @article {pmid33800232, year = {2021}, author = {Hsiao, CH and Lin, FY and Fang, ES and Chen, YF and Wen, YF and Huang, Y and Su, YC and Wu, YS and Kuo, HY}, title = {Optimization-Based Resource Management Algorithms with Considerations of Client Satisfaction and High Availability in Elastic 5G Network Slices.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {5}, pages = {}, pmid = {33800232}, issn = {1424-8220}, abstract = {A combined edge and core cloud computing environment is a novel solution in 5G network slices. The clients' high availability requirement is a challenge because it limits the possible admission control in front of the edge cloud. This work proposes an orchestrator with a mathematical programming model in a global viewpoint to solve resource management problems and satisfying the clients' high availability requirements. The proposed Lagrangian relaxation-based approach is adopted to solve the problems at a near-optimal level for increasing the system revenue. A promising and straightforward resource management approach and several experimental cases are used to evaluate the efficiency and effectiveness. Preliminary results are presented as performance evaluations to verify the proposed approach's suitability for edge and core cloud computing environments. The proposed orchestrator significantly enables the network slicing services and efficiently enhances the clients' satisfaction of high availability.}, } @article {pmid33797700, year = {2021}, author = {Bentes, PCL and Nadal, J}, title = {A telediagnosis assistance system for multiple-lead electrocardiography.}, journal = {Physical and engineering sciences in medicine}, volume = {44}, number = {2}, pages = {473-485}, pmid = {33797700}, issn = {2662-4737}, mesh = {Cloud Computing ; Electrocardiography ; Humans ; *Myocardial Ischemia ; Signal Processing, Computer-Assisted ; *Telemedicine ; }, abstract = {The diffusion of telemedicine opens-up a new perspective for the development of technologies furthered by Biomedical Engineering. In particular, herein we deal with those related to telediagnosis through multiple-lead electrocardiographic signals. This study focuses on the proof-of-concept of an internet-based telemedicine system as a use case that attests to the feasibility for the development, within the university environment, of techniques for remote processing of biomedical signals for adjustable detection of myocardial ischemia episodes. At each signal lead, QRS complexes are detected and delimited with the J-point marking. The same procedure to detect the complex is used to identify the respective T wave, then the area over the ST segment is applied to detect ischemia-related elevations. The entire system is designed on web-based telemedicine services using multiuser, remote access technologies, and database. The measurements for sensitivity and precision had their respective averages calculated at 11.79 and 24.21% for the leads of lower noise. The evaluations regarding the aspects of user friendliness and the usefulness of the application, resulted in 88.57 and 89.28% of broad or total acceptance, respectively. They are robust enough to enable scalability and can be offered by cloud computing, besides enabling the development of new biomedical signal processing techniques within the concept of distance services, using a modular architecture with collaborative bias.}, } @article {pmid33790958, year = {2021}, author = {Deepika, J and Rajan, C and Senthil, T}, title = {Security and Privacy of Cloud- and IoT-Based Medical Image Diagnosis Using Fuzzy Convolutional Neural Network.}, journal = {Computational intelligence and neuroscience}, volume = {2021}, number = {}, pages = {6615411}, pmid = {33790958}, issn = {1687-5273}, mesh = {Cloud Computing ; *Computer Security ; Confidentiality ; Humans ; Neural Networks, Computer ; *Privacy ; }, abstract = {In recent times, security in cloud computing has become a significant part in healthcare services specifically in medical data storage and disease prediction. A large volume of data are produced in the healthcare environment day by day due to the development in the medical devices. Thus, cloud computing technology is utilised for storing, processing, and handling these large volumes of data in a highly secured manner from various attacks. This paper focuses on disease classification by utilising image processing with secured cloud computing environment using an extended zigzag image encryption scheme possessing a greater tolerance to different data attacks. Secondly, a fuzzy convolutional neural network (FCNN) algorithm is proposed for effective classification of images. The decrypted images are used for classification of cancer levels with different layers of training. After classification, the results are transferred to the concern doctors and patients for further treatment process. Here, the experimental process is carried out by utilising the standard dataset. The results from the experiment concluded that the proposed algorithm shows better performance than the other existing algorithms and can be effectively utilised for the medical image diagnosis.}, } @article {pmid33783626, year = {2021}, author = {Floreano, IX and de Moraes, LAF}, title = {Land use/land cover (LULC) analysis (2009-2019) with Google Earth Engine and 2030 prediction using Markov-CA in the Rondônia State, Brazil.}, journal = {Environmental monitoring and assessment}, volume = {193}, number = {4}, pages = {239}, pmid = {33783626}, issn = {1573-2959}, mesh = {Agriculture ; Brazil ; *Conservation of Natural Resources ; Ecosystem ; *Environmental Monitoring ; }, abstract = {The Amazonian biome is important not only for South America but also for the entire planet, providing essential environmental services. The state of Rondônia ranks third in deforestation rates in the Brazilian Legal Amazon (BLA) political division. This study aims to evaluate the land use/land cover (LULC) changes over the past ten years (2009-2019), as well as, to predict the LULC in the next 10 years, using TerrSet 18.3 software, in the state of Rondônia, Brazil. The machine learning algorithms within the Google Earth Engine cloud-based platform employed a Random Forest classifier in image classifications. The Markov-CA deep learning algorithm predicted future LULC changes by comparing scenarios of one and three transitions. The results showed a reduction in forested areas of about 15.7% between 2009 and 2019 in the Rondônia state. According to the predictive model, by 2030, around 30% of the remaining forests will be logged, most likely converted into occupied areas. The results reinforce the importance of measures and policies integrated with investments in research and satellite monitoring to reduce deforestation in the Brazilian Amazon and ensure the continuity of the Amazonian role in halting climate change.}, } @article {pmid33775559, year = {2021}, author = {Wimberly, MC and de Beurs, KM and Loboda, TV and Pan, WK}, title = {Satellite Observations and Malaria: New Opportunities for Research and Applications.}, journal = {Trends in parasitology}, volume = {37}, number = {6}, pages = {525-537}, pmid = {33775559}, issn = {1471-5007}, support = {P2C HD065563/HD/NICHD NIH HHS/United States ; R01 AI079411/AI/NIAID NIH HHS/United States ; }, mesh = {*Environmental Monitoring/instrumentation/methods ; Humans ; Malaria/*prevention & control ; Remote Sensing Technology/*instrumentation ; Research/*trends ; *Satellite Imagery ; }, abstract = {Satellite remote sensing provides a wealth of information about environmental factors that influence malaria transmission cycles and human populations at risk. Long-term observations facilitate analysis of climate-malaria relationships, and high-resolution data can be used to assess the effects of agriculture, urbanization, deforestation, and water management on malaria. New sources of very-high-resolution satellite imagery and synthetic aperture radar data will increase the precision and frequency of observations. Cloud computing platforms for remote sensing data combined with analysis-ready datasets and high-level data products have made satellite remote sensing more accessible to nonspecialists. Further collaboration between the malaria and remote sensing communities is needed to develop and implement useful geospatial data products that will support global efforts toward malaria control, elimination, and eradication.}, } @article {pmid33770943, year = {2021}, author = {Li, C and Bao, K and Qin, S and Guan, K and Xu, G and Su, J}, title = {Grating-enabled high-speed high-efficiency surface-illuminated silicon photodiodes.}, journal = {Optics express}, volume = {29}, number = {3}, pages = {3458-3464}, doi = {10.1364/OE.412412}, pmid = {33770943}, issn = {1094-4087}, abstract = {High-speed, high-efficiency silicon photodetectors play important roles in the optical communication links that are used increasingly in data centers to handle the increasing volumes of data traffic and higher bandwidths required as use of big data and cloud computing continues to grow exponentially. Monolithic integration of the optical components with signal processing electronics on a single silicon chip is of paramount importance in the drive to reduce costs and improve performance. Here we report grating-enhanced light absorption in a silicon photodiode. The absorption efficiency is determined theoretically to be as high as 77% at 850 nm for the optimal structure, which has a thin intrinsic absorption layer with a thickness of 220 nm. The fabricated devices demonstrate a high bandwidth of 11.3 GHz and improved radio-frequency output power of more than 14 dB, thus making them suitable for use in data center optical communications.}, } @article {pmid33763309, year = {2021}, author = {Schoenbachler, JL and Hughey, JJ}, title = {pmparser and PMDB: resources for large-scale, open studies of the biomedical literature.}, journal = {PeerJ}, volume = {9}, number = {}, pages = {e11071}, pmid = {33763309}, issn = {2167-8359}, support = {R35 GM124685/GM/NIGMS NIH HHS/United States ; }, abstract = {PubMed is an invaluable resource for the biomedical community. Although PubMed is freely available, the existing API is not designed for large-scale analyses and the XML structure of the underlying data is inconvenient for complex queries. We developed an R package called pmparser to convert the data in PubMed to a relational database. Our implementation of the database, called PMDB, currently contains data on over 31 million PubMed Identifiers (PMIDs) and is updated regularly. Together, pmparser and PMDB can enable large-scale, reproducible, and transparent analyses of the biomedical literature. pmparser is licensed under GPL-2 and available at https://pmparser.hugheylab.org. PMDB is available in both PostgreSQL (DOI 10.5281/zenodo.4008109) and Google BigQuery (https://console.cloud.google.com/bigquery?project=pmdb-bq&d=pmdb).}, } @article {pmid33763195, year = {2021}, author = {Yao, L and Shang, D and Zhao, H and Hu, S}, title = {Medical Equipment Comprehensive Management System Based on Cloud Computing and Internet of Things.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {6685456}, pmid = {33763195}, issn = {2040-2309}, mesh = {Algorithms ; *Cloud Computing ; Hospitals ; Internet ; *Internet of Things ; }, abstract = {The continuous progress in modern medicine is not only the level of medical technology, but also various high-tech medical auxiliary equipment. With the rapid development of hospital information construction, medical equipment plays a very important role in the diagnosis, treatment, and prognosis observation of the disease. However, the continuous growth of the types and quantity of medical equipment has caused considerable difficulties in the management of hospital equipment. In order to improve the efficiency of medical equipment management in hospital, based on cloud computing and the Internet of Things, this paper develops a comprehensive management system of medical equipment and uses the improved particle swarm optimization algorithm and chicken swarm algorithm to help the system reasonably achieve dynamic task scheduling. The purpose of this paper is to develop a comprehensive intelligent management system to master the procurement, maintenance, and use of all medical equipment in the hospital, so as to maximize the scientific management of medical equipment in the hospital. Scientific Management. It is very necessary to develop a preventive maintenance plan for medical equipment. From the experimental data, it can be seen that when the system simultaneously accesses 100 simulated users online, the corresponding time for submitting the equipment maintenance application form is 1228 ms, and the accuracy rate is 99.8%. When there are 1000 simulated online users, the corresponding time for submitting the equipment maintenance application form is 5123 ms, and the correct rate is 99.4%. On the whole, the medical equipment management information system has excellent performance in stress testing. It not only predicts the initial performance requirements, but also provides a large amount of data support for equipment management and maintenance.}, } @article {pmid33751044, year = {2022}, author = {Caufield, JH and Sigdel, D and Fu, J and Choi, H and Guevara-Gonzalez, V and Wang, D and Ping, P}, title = {Cardiovascular informatics: building a bridge to data harmony.}, journal = {Cardiovascular research}, volume = {118}, number = {3}, pages = {732-745}, pmid = {33751044}, issn = {1755-3245}, support = {R01 HL146739/HL/NHLBI NIH HHS/United States ; R35 HL135772/HL/NHLBI NIH HHS/United States ; T32 HL139450/HL/NHLBI NIH HHS/United States ; }, mesh = {*Artificial Intelligence ; *Cardiovascular Diseases/diagnosis/therapy ; Cloud Computing ; Humans ; Informatics ; Machine Learning ; }, abstract = {The search for new strategies for better understanding cardiovascular (CV) disease is a constant one, spanning multitudinous types of observations and studies. A comprehensive characterization of each disease state and its biomolecular underpinnings relies upon insights gleaned from extensive information collection of various types of data. Researchers and clinicians in CV biomedicine repeatedly face questions regarding which types of data may best answer their questions, how to integrate information from multiple datasets of various types, and how to adapt emerging advances in machine learning and/or artificial intelligence to their needs in data processing. Frequently lauded as a field with great practical and translational potential, the interface between biomedical informatics and CV medicine is challenged with staggeringly massive datasets. Successful application of computational approaches to decode these complex and gigantic amounts of information becomes an essential step toward realizing the desired benefits. In this review, we examine recent efforts to adapt informatics strategies to CV biomedical research: automated information extraction and unification of multifaceted -omics data. We discuss how and why this interdisciplinary space of CV Informatics is particularly relevant to and supportive of current experimental and clinical research. We describe in detail how open data sources and methods can drive discovery while demanding few initial resources, an advantage afforded by widespread availability of cloud computing-driven platforms. Subsequently, we provide examples of how interoperable computational systems facilitate exploration of data from multiple sources, including both consistently formatted structured data and unstructured data. Taken together, these approaches for achieving data harmony enable molecular phenotyping of CV diseases and unification of CV knowledge.}, } @article {pmid33748749, year = {2021}, author = {Ogle, C and Reddick, D and McKnight, C and Biggs, T and Pauly, R and Ficklin, SP and Feltus, FA and Shannigrahi, S}, title = {Named Data Networking for Genomics Data Management and Integrated Workflows.}, journal = {Frontiers in big data}, volume = {4}, number = {}, pages = {582468}, pmid = {33748749}, issn = {2624-909X}, abstract = {Advanced imaging and DNA sequencing technologies now enable the diverse biology community to routinely generate and analyze terabytes of high resolution biological data. The community is rapidly heading toward the petascale in single investigator laboratory settings. As evidence, the single NCBI SRA central DNA sequence repository contains over 45 petabytes of biological data. Given the geometric growth of this and other genomics repositories, an exabyte of mineable biological data is imminent. The challenges of effectively utilizing these datasets are enormous as they are not only large in the size but also stored in geographically distributed repositories in various repositories such as National Center for Biotechnology Information (NCBI), DNA Data Bank of Japan (DDBJ), European Bioinformatics Institute (EBI), and NASA's GeneLab. In this work, we first systematically point out the data-management challenges of the genomics community. We then introduce Named Data Networking (NDN), a novel but well-researched Internet architecture, is capable of solving these challenges at the network layer. NDN performs all operations such as forwarding requests to data sources, content discovery, access, and retrieval using content names (that are similar to traditional filenames or filepaths) and eliminates the need for a location layer (the IP address) for data management. Utilizing NDN for genomics workflows simplifies data discovery, speeds up data retrieval using in-network caching of popular datasets, and allows the community to create infrastructure that supports operations such as creating federation of content repositories, retrieval from multiple sources, remote data subsetting, and others. Named based operations also streamlines deployment and integration of workflows with various cloud platforms. Our contributions in this work are as follows 1) we enumerate the cyberinfrastructure challenges of the genomics community that NDN can alleviate, and 2) we describe our efforts in applying NDN for a contemporary genomics workflow (GEMmaker) and quantify the improvements. The preliminary evaluation shows a sixfold speed up in data insertion into the workflow. 3) As a pilot, we have used an NDN naming scheme (agreed upon by the community and discussed in Section 4) to publish data from broadly used data repositories including the NCBI SRA. We have loaded the NDN testbed with these pre-processed genomes that can be accessed over NDN and used by anyone interested in those datasets. Finally, we discuss our continued effort in integrating NDN with cloud computing platforms, such as the Pacific Research Platform (PRP). The reader should note that the goal of this paper is to introduce NDN to the genomics community and discuss NDN's properties that can benefit the genomics community. We do not present an extensive performance evaluation of NDN-we are working on extending and evaluating our pilot deployment and will present systematic results in a future work.}, } @article {pmid33740542, year = {2021}, author = {Guo, J and Chen, S and Tian, S and Liu, K and Ni, J and Zhao, M and Kang, Y and Ma, X and Guo, J}, title = {5G-enabled ultra-sensitive fluorescence sensor for proactive prognosis of COVID-19.}, journal = {Biosensors & bioelectronics}, volume = {181}, number = {}, pages = {113160}, pmid = {33740542}, issn = {1873-4235}, mesh = {*Biosensing Techniques ; COVID-19/*diagnosis ; *Computer Systems ; Fluorescence ; Humans ; *Immunoassay ; Prognosis ; SARS-CoV-2 ; }, abstract = {The severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2) is spreading around the globe since December 2019. There is an urgent need to develop sensitive and online methods for on-site diagnosing and monitoring of suspected COVID-19 patients. With the huge development of Internet of Things (IoT), the impact of Internet of Medical Things (IoMT) provides an impressive solution to this problem. In this paper, we proposed a 5G-enabled fluorescence sensor for quantitative detection of spike protein and nucleocapsid protein of SARS-CoV-2 by using mesoporous silica encapsulated up-conversion nanoparticles (UCNPs@mSiO2) labeled lateral flow immunoassay (LFIA). The sensor can detect spike protein (SP) with a detection of limit (LOD) 1.6 ng/mL and nucleocapsid protein (NP) with an LOD of 2.2 ng/mL. The feasibility of the sensor in clinical use was further demonstrated by utilizing virus culture as real clinical samples. Moreover, the proposed fluorescence sensor is IoMT enabled, which is accessible to edge hardware devices (personal computers, 5G smartphones, IPTV, etc.) through Bluetooth. Medical data can be transmitted to the fog layer of the network and 5G cloud server with ultra-low latency and high reliably for edge computing and big data analysis. Furthermore, a COVID-19 monitoring module working with the proposed the system is developed on a smartphone application (App), which endows patients and their families to record their medical data and daily conditions remotely, releasing the burdens of going to central hospitals. We believe that the proposed system will be highly practical in the future treatment and prevention of COVID-19 and other mass infectious diseases.}, } @article {pmid33739401, year = {2021}, author = {Blamey, B and Toor, S and Dahlö, M and Wieslander, H and Harrison, PJ and Sintorn, IM and Sabirsh, A and Wählby, C and Spjuth, O and Hellander, A}, title = {Rapid development of cloud-native intelligent data pipelines for scientific data streams using the HASTE Toolkit.}, journal = {GigaScience}, volume = {10}, number = {3}, pages = {}, pmid = {33739401}, issn = {2047-217X}, mesh = {*Biological Science Disciplines ; Diagnostic Imaging ; *Software ; }, abstract = {BACKGROUND: Large streamed datasets, characteristic of life science applications, are often resource-intensive to process, transport and store. We propose a pipeline model, a design pattern for scientific pipelines, where an incoming stream of scientific data is organized into a tiered or ordered "data hierarchy". We introduce the HASTE Toolkit, a proof-of-concept cloud-native software toolkit based on this pipeline model, to partition and prioritize data streams to optimize use of limited computing resources.

FINDINGS: In our pipeline model, an "interestingness function" assigns an interestingness score to data objects in the stream, inducing a data hierarchy. From this score, a "policy" guides decisions on how to prioritize computational resource use for a given object. The HASTE Toolkit is a collection of tools to adopt this approach. We evaluate with 2 microscopy imaging case studies. The first is a high content screening experiment, where images are analyzed in an on-premise container cloud to prioritize storage and subsequent computation. The second considers edge processing of images for upload into the public cloud for real-time control of a transmission electron microscope.

CONCLUSIONS: Through our evaluation, we created smart data pipelines capable of effective use of storage, compute, and network resources, enabling more efficient data-intensive experiments. We note a beneficial separation between scientific concerns of data priority, and the implementation of this behaviour for different resources in different deployment contexts. The toolkit allows intelligent prioritization to be `bolted on' to new and existing systems - and is intended for use with a range of technologies in different deployment scenarios.}, } @article {pmid33737518, year = {2021}, author = {Kumar, D}, title = {Urban objects detection from C-band synthetic aperture radar (SAR) satellite images through simulating filter properties.}, journal = {Scientific reports}, volume = {11}, number = {1}, pages = {6241}, pmid = {33737518}, issn = {2045-2322}, abstract = {Satellite-based remote sensing has a key role in the monitoring earth features, but due to flaws like cloud penetration capability and selective duration for remote sensing in traditional remote sensing methods, now the attention has shifted towards the use of alternative methods such as microwave or radar sensing technology. Microwave remote sensing utilizes synthetic aperture radar (SAR) technology for remote sensing and it can operate in all weather conditions. Previous researchers have reported about effects of SAR pre-processing for urban objects detection and mapping. Preparing high accuracy urban maps are critical to disaster planning and response efforts, thus result from this study can help to users on the required pre-processing steps and its effects. Owing to the induced errors (such as calibration, geometric, speckle noise) in the radar images, these images are affected by several distortions, therefore these distortions need to be processed before any applications, as it causes issues in image interpretation and these can destroy valuable information about shapes, size, pattern and tone of various desired objects. The present work aims to utilize the sentinel-1 SAR datasets for urban studies (i.e. urban object detection through simulation of filter properties). The work uses C-band SAR datasets acquired from Sentinel-1A/B sensor, and the Google Earth datasets to validate the recognized objects. It was observed that the Refined-Lee filter performed well to provide detailed information about the various urban objects. It was established that the attempted approach cannot be generalised as one suitable method for sensing or identifying accurate urban objects from the C-band SAR images. Hence some more datasets in different polarisation combinations are required to be attempted.}, } @article {pmid33733530, year = {2021}, author = {Chandak, T and Wong, CF}, title = {EDock-ML: A web server for using ensemble docking with machine learning to aid drug discovery.}, journal = {Protein science : a publication of the Protein Society}, volume = {30}, number = {5}, pages = {1087-1097}, pmid = {33733530}, issn = {1469-896X}, support = {R15 CA224033/CA/NCI NIH HHS/United States ; }, mesh = {*Databases, Chemical ; *Drug Discovery ; *Internet ; *Machine Learning ; *Molecular Docking Simulation ; *Software ; }, abstract = {EDock-ML is a web server that facilitates the use of ensemble docking with machine learning to help decide whether a compound is worthwhile to be considered further in a drug discovery process. Ensemble docking provides an economical way to account for receptor flexibility in molecular docking. Machine learning improves the use of the resulting docking scores to evaluate whether a compound is likely to be useful. EDock-ML takes a bottom-up approach in which machine-learning models are developed one protein at a time to improve predictions for the proteins included in its database. Because the machine-learning models are intended to be used without changing the docking and model parameters with which the models were trained, novice users can use it directly without worrying about what parameters to choose. A user simply submits a compound specified by an ID from the ZINC database (Sterling, T.; Irwin, J. J., J Chem Inf Model 2015, 55[11], 2,324-2,337.) or upload a file prepared by a chemical drawing program and receives an output helping the user decide the likelihood of the compound to be active or inactive for a drug target. EDock-ML can be accessed freely at edock-ml.umsl.edu.}, } @article {pmid33732040, year = {2021}, author = {Ali, MA}, title = {Phylotranscriptomic analysis of Dillenia indica L. (Dilleniales, Dilleniaceae) and its systematics implication.}, journal = {Saudi journal of biological sciences}, volume = {28}, number = {3}, pages = {1557-1560}, pmid = {33732040}, issn = {1319-562X}, abstract = {The recent massive development in the next-generation sequencing platforms and bioinformatics tools including cloud based computing have proven extremely useful in understanding the deeper-level phylogenetic relationships of angiosperms. The present phylotranscriptomic analyses address the poorly known evolutionary relationships of the order Dilleniales to order of the other angiosperms using the minimum evolution method. The analyses revealed the nesting of the representative taxon of Dilleniales in the MPT but distinct from the representative of the order Santalales, Caryophyllales, Asterales, Cornales, Ericales, Lamiales, Saxifragales, Fabales, Malvales, Vitales and Berberidopsidales.}, } @article {pmid33727760, year = {2021}, author = {Bandara, E and Liang, X and Foytik, P and Shetty, S and Hall, C and Bowden, D and Ranasinghe, N and De Zoysa, K}, title = {A blockchain empowered and privacy preserving digital contact tracing platform.}, journal = {Information processing & management}, volume = {58}, number = {4}, pages = {102572}, pmid = {33727760}, issn = {0306-4573}, abstract = {The spread of the COVID-19 virus continues to increase fatality rates and exhaust the capacity of healthcare providers. Efforts to prevent transmission of the virus among humans remains a high priority. The current efforts to quarantine involve social distancing, monitoring and tracking the infected patients. However, the spread of the virus is too rapid to be contained only by manual and inefficient human contact tracing activities. To address this challenge, we have developed Connect, a blockchain empowered digital contact tracing platform that can leverage information on positive cases and notify people in their immediate proximity which would thereby reduce the rate at which the infection could spread. This would particularly be effective if sufficient people use the platform and benefit from the targeted recommendations. The recommendations would be made in a privacy-preserving fashion and contain the spread of the virus without the need for an extended period of potential lockdown. Connect is an identity wallet platform which will keep user digital identities and user activity trace data on a blockchain platform using Self-Sovereign Identity(SSI) proofs. User activities include the places he/she has travelled, the country of origin he/she came from, travel and dispatch updates from the airport etc. With these activity trace records, Connect platform can easily identify suspected patients who may be infected with the COVID-19 virus and take precautions before spreading it. By storing digital identities and activity trace records on blockchain-based SSI platform, Connect addresses the common issues in centralized cloud-based storage platforms (e.g. lack of data immutability, lack of traceability).}, } @article {pmid33724836, year = {2021}, author = {Olivella, R and Chiva, C and Serret, M and Mancera, D and Cozzuto, L and Hermoso, A and Borràs, E and Espadas, G and Morales, J and Pastor, O and Solé, A and Ponomarenko, J and Sabidó, E}, title = {QCloud2: An Improved Cloud-based Quality-Control System for Mass-Spectrometry-based Proteomics Laboratories.}, journal = {Journal of proteome research}, volume = {20}, number = {4}, pages = {2010-2013}, doi = {10.1021/acs.jproteome.0c00853}, pmid = {33724836}, issn = {1535-3907}, mesh = {*Cloud Computing ; Laboratories ; Mass Spectrometry ; *Proteomics ; Quality Control ; Reproducibility of Results ; Software ; }, abstract = {QCloud is a cloud-based system to support proteomics laboratories in daily quality assessment using a user-friendly interface, easy setup, and automated data processing. Since its release, QCloud has facilitated automated quality control for proteomics experiments in many laboratories. QCloud provides a quick and effortless evaluation of instrument performance that helps to overcome many analytical challenges derived from clinical and translational research. Here we present an improved version of the system, QCloud2. This new version includes enhancements in the scalability and reproducibility of the quality-control pipelines, and it features an improved front end for data visualization, user management, and chart annotation. The QCloud2 system also includes programmatic access and a standalone local version.}, } @article {pmid33719569, year = {2021}, author = {Tanwar, AS and Evangelatos, N and Venne, J and Ogilvie, LA and Satyamoorthy, K and Brand, A}, title = {Global Open Health Data Cooperatives Cloud in an Era of COVID-19 and Planetary Health.}, journal = {Omics : a journal of integrative biology}, volume = {25}, number = {3}, pages = {169-175}, doi = {10.1089/omi.2020.0134}, pmid = {33719569}, issn = {1557-8100}, mesh = {*Big Data ; COVID-19/*epidemiology/virology ; *Cloud Computing ; Delivery of Health Care ; *Global Health ; High-Throughput Nucleotide Sequencing ; Humans ; *Information Dissemination ; *International Cooperation ; *SARS-CoV-2/genetics ; }, abstract = {Big data in both the public domain and the health care industry are growing rapidly, for example, with broad availability of next-generation sequencing and large-scale phenomics datasets on patient-reported outcomes. In parallel, we are witnessing new research approaches that demand sharing of data for the benefit of planetary society. Health data cooperatives (HDCs) is one such approach, where health data are owned and governed collectively by citizens who take part in the HDCs. Data stored in HDCs should remain readily available for translation to public health practice but at the same time, governed in a critically informed manner to ensure data integrity, veracity, and privacy, to name a few pressing concerns. As a solution, we suggest that data generated from high-throughput omics research and phenomics can be stored in an open cloud platform so that researchers around the globe can share health data and work collaboratively. We describe here the Global Open Health Data Cooperatives Cloud (GOHDCC) as a proposed cloud platform-based model for the sharing of health data between different HDCCs around the globe. GOHDCC's main objective is to share health data on a global scale for robust and responsible global science, research, and development. GOHDCC is a citizen-oriented model cooperatively governed by citizens. The model essentially represents a global sharing platform that could benefit all stakeholders along the health care value chain.}, } @article {pmid33713354, year = {2021}, author = {Paredes-Pacheco, J and López-González, FJ and Silva-Rodríguez, J and Efthimiou, N and Niñerola-Baizán, A and Ruibal, Á and Roé-Vellvé, N and Aguiar, P}, title = {SimPET-An open online platform for the Monte Carlo simulation of realistic brain PET data. Validation for [18] F-FDG scans.}, journal = {Medical physics}, volume = {48}, number = {5}, pages = {2482-2493}, pmid = {33713354}, issn = {2473-4209}, support = {FPU16/05108//Ministerio de Educación, Cultura y Deporte (MECD)/ ; FPU17/04470//Ministerio de Educación, Cultura y Deporte (MECD)/ ; EAPA_791/2018//European Commission (EC)/ ; RYC-2015/17430//MEC | Consejo Superior de Investigaciones Científicas (CSIC)/ ; }, mesh = {Algorithms ; Brain/diagnostic imaging ; *Fluorodeoxyglucose F18 ; Humans ; Image Processing, Computer-Assisted ; Monte Carlo Method ; *Positron Emission Tomography Computed Tomography ; Positron-Emission Tomography ; }, abstract = {PURPOSE: SimPET (www.sim-pet.org) is a free cloud-based platform for the generation of realistic brain positron emission tomography (PET) data. In this work, we introduce the key features of the platform. In addition, we validate the platform by performing a comparison between simulated healthy brain FDG-PET images and real healthy subject data for three commercial scanners (GE Advance NXi, GE Discovery ST, and Siemens Biograph mCT).

METHODS: The platform provides a graphical user interface to a set of automatic scripts taking care of the code execution for the phantom generation, simulation (SimSET), and tomographic image reconstruction (STIR). We characterize the performance using activity and attenuation maps derived from PET/CT and MRI data of 25 healthy subjects acquired with a GE Discovery ST. We then use the created maps to generate synthetic data for the GE Discovery ST, the GE Advance NXi, and the Siemens Biograph mCT. The validation was carried out by evaluating Bland-Altman differences between real and simulated images for each scanner. In addition, SPM voxel-wise comparison was performed to highlight regional differences. Examples for amyloid PET and for the generation of ground-truth pathological patients are included.

RESULTS: The platform can be efficiently used for generating realistic simulated FDG-PET images in a reasonable amount of time. The validation showed small differences between SimPET and acquired FDG-PET images, with errors below 10% for 98.09% (GE Discovery ST), 95.09% (GE Advance NXi), and 91.35% (Siemens Biograph mCT) of the voxels. Nevertheless, our SPM analysis showed significant regional differences between the simulated images and real healthy patients, and thus, the use of the platform for converting control subject databases between different scanners requires further investigation.

CONCLUSIONS: The presented platform can potentially allow scientists in clinical and research settings to perform MC simulation experiments without the need for high-end hardware or advanced computing knowledge and in a reasonable amount of time.}, } @article {pmid33711538, year = {2021}, author = {Wang, X and Jiang, X and Vaidya, J}, title = {Efficient verification for outsourced genome-wide association studies.}, journal = {Journal of biomedical informatics}, volume = {117}, number = {}, pages = {103714}, pmid = {33711538}, issn = {1532-0480}, support = {R01 GM114612/GM/NIGMS NIH HHS/United States ; R01 GM118574/GM/NIGMS NIH HHS/United States ; R35 GM134927/GM/NIGMS NIH HHS/United States ; U01 TR002062/TR/NCATS NIH HHS/United States ; }, mesh = {Algorithms ; *Cloud Computing ; *Genome-Wide Association Study ; Humans ; Phenotype ; Polymorphism, Single Nucleotide ; }, abstract = {With cloud computing is being widely adopted in conducting genome-wide association studies (GWAS), how to verify the integrity of outsourced GWAS computation remains to be accomplished. Here, we propose two novel algorithms to generate synthetic SNPs that are indistinguishable from real SNPs. The first method creates synthetic SNPs based on the phenotype vector, while the second approach creates synthetic SNPs based on real SNPs that are most similar to the phenotype vector. The time complexity of the first approach and the second approach is Om and Omlogn[2], respectively, where m is the number of subjects while n is the number of SNPs. Furthermore, through a game theoretic analysis, we demonstrate that it is possible to incentivize honest behavior by the server by coupling appropriate payoffs with randomized verification. We conduct extensive experiments of our proposed methods, and the results show that beyond a formal adversarial model, when only a few synthetic SNPs are generated and mixed into the real data they cannot be distinguished from the real SNPs even by a variety of predictive machine learning models. We demonstrate that the proposed approach can ensure that logistic regression for GWAS can be outsourced in an efficient and trustworthy way.}, } @article {pmid33693476, year = {2021}, author = {Bahmani, A and Xing, Z and Krishnan, V and Ray, U and Mueller, F and Alavi, A and Tsao, PS and Snyder, MP and Pan, C}, title = {Hummingbird: efficient performance prediction for executing genomic applications in the cloud.}, journal = {Bioinformatics (Oxford, England)}, volume = {37}, number = {17}, pages = {2537-2543}, pmid = {33693476}, issn = {1367-4811}, support = {P50 HG007735/HG/NHGRI NIH HHS/United States ; RM1 HG007735/HG/NHGRI NIH HHS/United States ; U24 HG009397/HG/NHGRI NIH HHS/United States ; }, abstract = {MOTIVATION: A major drawback of executing genomic applications on cloud computing facilities is the lack of tools to predict which instance type is the most appropriate, often resulting in an over- or under- matching of resources. Determining the right configuration before actually running the applications will save money and time. Here, we introduce Hummingbird, a tool for predicting performance of computing instances with varying memory and CPU on multiple cloud platforms.

RESULTS: Our experiments on three major genomic data pipelines, including GATK HaplotypeCaller, GATK Mutect2 and ENCODE ATAC-seq, showed that Hummingbird was able to address applications in command line specified in JSON format or workflow description language (WDL) format, and accurately predicted the fastest, the cheapest and the most cost-efficient compute instances in an economic manner.

Hummingbird is available as an open source tool at: https://github.com/StanfordBioinformatics/Hummingbird.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid33693426, year = {2020}, author = {Wang, M and Yang, T and Flechas, MA and Harris, P and Hawks, B and Holzman, B and Knoepfel, K and Krupa, J and Pedro, K and Tran, N}, title = {GPU-Accelerated Machine Learning Inference as a Service for Computing in Neutrino Experiments.}, journal = {Frontiers in big data}, volume = {3}, number = {}, pages = {604083}, pmid = {33693426}, issn = {2624-909X}, abstract = {Machine learning algorithms are becoming increasingly prevalent and performant in the reconstruction of events in accelerator-based neutrino experiments. These sophisticated algorithms can be computationally expensive. At the same time, the data volumes of such experiments are rapidly increasing. The demand to process billions of neutrino events with many machine learning algorithm inferences creates a computing challenge. We explore a computing model in which heterogeneous computing with GPU coprocessors is made available as a web service. The coprocessors can be efficiently and elastically deployed to provide the right amount of computing for a given processing task. With our approach, Services for Optimized Network Inference on Coprocessors (SONIC), we integrate GPU acceleration specifically for the ProtoDUNE-SP reconstruction chain without disrupting the native computing workflow. With our integrated framework, we accelerate the most time-consuming task, track and particle shower hit identification, by a factor of 17. This results in a factor of 2.7 reduction in the total processing time when compared with CPU-only production. For this particular task, only 1 GPU is required for every 68 CPU threads, providing a cost-effective solution.}, } @article {pmid33693420, year = {2020}, author = {Qayyum, A and Ijaz, A and Usama, M and Iqbal, W and Qadir, J and Elkhatib, Y and Al-Fuqaha, A}, title = {Securing Machine Learning in the Cloud: A Systematic Review of Cloud Machine Learning Security.}, journal = {Frontiers in big data}, volume = {3}, number = {}, pages = {587139}, pmid = {33693420}, issn = {2624-909X}, abstract = {With the advances in machine learning (ML) and deep learning (DL) techniques, and the potency of cloud computing in offering services efficiently and cost-effectively, Machine Learning as a Service (MLaaS) cloud platforms have become popular. In addition, there is increasing adoption of third-party cloud services for outsourcing training of DL models, which requires substantial costly computational resources (e.g., high-performance graphics processing units (GPUs)). Such widespread usage of cloud-hosted ML/DL services opens a wide range of attack surfaces for adversaries to exploit the ML/DL system to achieve malicious goals. In this article, we conduct a systematic evaluation of literature of cloud-hosted ML/DL models along both the important dimensions-attacks and defenses-related to their security. Our systematic review identified a total of 31 related articles out of which 19 focused on attack, six focused on defense, and six focused on both attack and defense. Our evaluation reveals that there is an increasing interest from the research community on the perspective of attacking and defending different attacks on Machine Learning as a Service platforms. In addition, we identify the limitations and pitfalls of the analyzed articles and highlight open research issues that require further investigation.}, } @article {pmid33691446, year = {2021}, author = {Cai, Y and Zeng, M and Chen, YZ}, title = {The pharmacological mechanism of Huashi Baidu Formula for the treatment of COVID-19 by combined network pharmacology and molecular docking.}, journal = {Annals of palliative medicine}, volume = {10}, number = {4}, pages = {3864-3895}, doi = {10.21037/apm-20-1759}, pmid = {33691446}, issn = {2224-5839}, mesh = {*COVID-19 ; *Drugs, Chinese Herbal/pharmacology/therapeutic use ; Humans ; Molecular Docking Simulation ; SARS-CoV-2 ; }, abstract = {BACKGROUND: Huashi Baidu Formula (HSBDF) is a traditional Chinese medicine formula consisting of fourteen parts, which has been proven effective for treating coronavirus disease 2019 (COVID-19) clinically. However, the therapeutic mechanism of the effect of HSBDF on COVID-19 remains unclear.

METHODS: The components and action targets of HSBDF were searched in the TCMSP, YaTCM, PubChem, and TargetNet databases. Disease targets related to ACE2 were screened in single-cell sequence data of colon epithelial cells from other reports. The therapeutic targets of HSBDF for COVID-19 were obtained by integrated analysis, and the protein-protein interaction was analyzed using the STRING database. The Gene Ontology (GO) and Kyoto Encyclopedia of Genes and Genomes (KEGG) processes were analyzed using the OmicsBean and Metascape databases. The communication between networks [component-target (C-T) network, component-target-pathway (C-T-P) network, herb-target (H-T) network, target-pathway (T-P) network, and meridian-tropism (M-T) network] was constructed by Cytoscape software. The Cloud computing molecular docking platform was used to verify the molecular docking.

RESULTS: The obtained 223 active ingredients and 358 targets of HSBDF. The 5,555 COVID-19 disease targets related to ACE2 were extracted, and 84 compound-disease common targets were found, of which the principal targets included ACE, ESR1, ADRA1A, and HDAC1. A total of 3,946 items were seized by GO enrichment analysis, mainly related to metabolism, protein binding, cellular response to the stimulus, and receptor activity. The enriched KEGG pathways screened 46 signaling pathways, including the renin-angiotensin system, the renin secretion, NF-kappa B pathway, the arachidonic acid metabolism, and the AMPK signaling pathway. The molecular docking results showed that the bioactive components of HSBDF have an excellent binding ability with main proteins related to severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2).

CONCLUSIONS: HSBDF might act on SARS-CoV-2 through multiple components, targets, and pathways. Here we reveal preliminary results of the mechanism of action of HSBDF on SARS-CoV-2, providing a theoretical basis for future clinical applications.}, } @article {pmid33686319, year = {2021}, author = {Qi, Q and Tao, F and Cheng, Y and Cheng, J and Nee, AYC}, title = {New IT driven rapid manufacturing for emergency response.}, journal = {Journal of manufacturing systems}, volume = {60}, number = {}, pages = {928-935}, pmid = {33686319}, issn = {1878-6642}, abstract = {COVID-19, which is rampant around the world, has seriously disrupted people's normal work and living. To respond to public urgent needs such as COVID-19, emergency supplies are essential. However, due to the special requirements of supplies, when an emergency occurs, the supply reserve mostly cannot cope with the high demand. Given the importance of emergency supplies in public emergencies, rapid response manufacturing of emergency supplies is a necessity. The faster emergency supplies and facilities are manufactured, the more likely the pandemic can be controlled and the more human lives are saved. Besides, new generation information technology represented by cloud computing, IoT, big data, AI, etc. is rapidly developing and can be widely used to address such situations. Therefore, rapid response manufacturing enabled by New IT is presented to quickly meet emergency demands. And some policy suggestions are presented.}, } @article {pmid33681063, year = {2020}, author = {Jha, RR and Verma, RK and Kishore, A and Rana, RK and Barnwal, RK and Singh, HK and Kumar, D}, title = {Mapping fear among doctors manning screening clinics for COVID19. Results from cloud based survey in Eastern parts of India.}, journal = {Journal of family medicine and primary care}, volume = {9}, number = {12}, pages = {6194-6200}, pmid = {33681063}, issn = {2249-4863}, abstract = {BACKGROUND: As the number of cases of COVID19 from novel corona virus 2019 rises so are the number of deaths ensuing from it. Doctors have been in front in these calamitous times across the world. India has less number of doctors so doctors are overwhelmed with more number of patients to cater. Thereby they are also fearing that they will be exposed much as they often work in limited resource settings.

METHODS: An on line survey was to include doctors from eastern states in India for measuring the reasons of their fear and suggest possible solutions based on the results achieved thus. After IEC clearance a semi-structured anonymous questionnaire was sent on google forms as links on known to doctors, working in screening OPDs or flu clinics especially for COVID-19.

RESULTS: Out of 59 Doctors majority were provided with sanitizers for practicing hand hygiene. Gloves were provided everywhere but masks particularly N95 and Triple Layer surgical masks were not there for all. Training was not given universally. Fear was dependent on age in our sample.

CONCLUSION: Training and strict adherence to infection control measures along with resources can help in removing the fear.}, } @article {pmid33676373, year = {2021}, author = {Augustyn, DR and Wyciślik, Ł and Mrozek, D}, title = {Perspectives of using Cloud computing in integrative analysis of multi-omics data.}, journal = {Briefings in functional genomics}, volume = {20}, number = {4}, pages = {198-206}, doi = {10.1093/bfgp/elab007}, pmid = {33676373}, issn = {2041-2657}, support = {02/020/RGPL9/0184//Rector of the Silesian University of Technology, Gliwice, Poland/ ; 02/100/BK_21/0008//Statutory Research funds of Department of Applied Informatics, Silesian University of Technology, Gliwice, Poland/ ; }, mesh = {*Cloud Computing ; Data Analysis ; *Models, Theoretical ; }, abstract = {Integrative analysis of multi-omics data is usually computationally demanding. It frequently requires building complex, multi-step analysis pipelines, applying dedicated techniques for data processing and combining several data sources. These efforts lead to a better understanding of life processes, current health state or the effects of therapeutic activities. However, many omics data analysis solutions focus only on a selected problem, disease, types of data or organisms. Moreover, they are implemented for general-purpose scientific computational platforms that most often do not easily scale the calculations natively. These features are not conducive to advances in understanding genotype-phenotypic relationships. Fortunately, with new technological paradigms, including Cloud computing, virtualization and containerization, these functionalities could be orchestrated for easy scaling and building independent analysis pipelines for omics data. Therefore, solutions can be re-used for purposes that they were not primarily designed. This paper shows perspectives of using Cloud computing advances and containerization approach for such a purpose. We first review how the Cloud computing model is utilized in multi-omics data analysis and show weak points of the adopted solutions. Then, we introduce containerization concepts, which allow both scaling and linking of functional services designed for various purposes. Finally, on the Bioconductor software package example, we disclose a verified concept model of a universal solution that exhibits the potentials for performing integrative analysis of multiple omics data sources.}, } @article {pmid33672768, year = {2021}, author = {Hossain, MD and Sultana, T and Hossain, MA and Hossain, MI and Huynh, LNT and Park, J and Huh, EN}, title = {Fuzzy Decision-Based Efficient Task Offloading Management Scheme in Multi-Tier MEC-Enabled Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {4}, pages = {}, pmid = {33672768}, issn = {1424-8220}, support = {No.2017-0-00294//Institute for Information & Communications Technology Planning & Evaluation/ ; }, abstract = {Multi-access edge computing (MEC) is a new leading technology for meeting the demands of key performance indicators (KPIs) in 5G networks. However, in a rapidly changing dynamic environment, it is hard to find the optimal target server for processing offloaded tasks because we do not know the end users' demands in advance. Therefore, quality of service (QoS) deteriorates because of increasing task failures and long execution latency from congestion. To reduce latency and avoid task failures from resource-constrained edge servers, vertical offloading between mobile devices with local-edge collaboration or with local edge-remote cloud collaboration have been proposed in previous studies. However, they ignored the nearby edge server in the same tier that has excess computing resources. Therefore, this paper introduces a fuzzy decision-based cloud-MEC collaborative task offloading management system called FTOM, which takes advantage of powerful remote cloud-computing capabilities and utilizes neighboring edge servers. The main objective of the FTOM scheme is to select the optimal target node for task offloading based on server capacity, latency sensitivity, and the network's condition. Our proposed scheme can make dynamic decisions where local or nearby MEC servers are preferred for offloading delay-sensitive tasks, and delay-tolerant high resource-demand tasks are offloaded to a remote cloud server. Simulation results affirm that our proposed FTOM scheme significantly improves the rate of successfully executing offloaded tasks by approximately 68.5%, and reduces task completion time by 66.6%, when compared with a local edge offloading (LEO) scheme. The improved and reduced rates are 32.4% and 61.5%, respectively, when compared with a two-tier edge orchestration-based offloading (TTEO) scheme. They are 8.9% and 47.9%, respectively, when compared with a fuzzy orchestration-based load balancing (FOLB) scheme, approximately 3.2% and 49.8%, respectively, when compared with a fuzzy workload orchestration-based task offloading (WOTO) scheme, and approximately 38.6%% and 55%, respectively, when compared with a fuzzy edge-orchestration based collaborative task offloading (FCTO) scheme.}, } @article {pmid33671542, year = {2021}, author = {Choi, J and Ahn, S}, title = {Optimal Service Provisioning for the Scalable Fog/Edge Computing Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {4}, pages = {}, pmid = {33671542}, issn = {1424-8220}, support = {NRF-2018R1D1A1B07047339//National Research Foundation of Korea/ ; }, abstract = {In recent years, we observed the proliferation of cloud data centers (CDCs) and the Internet of Things (IoT). Cloud computing based on CDCs has the drawback of unpredictable response times due to variant delays between service requestors (IoT devices and end devices) and CDCs. This deficiency of cloud computing is especially problematic in providing IoT services with strict timing requirements and as a result, gives birth to fog/edge computing (FEC) whose responsiveness is achieved by placing service images near service requestors. In FEC, the computing nodes located close to service requestors are called fog/edge nodes (FENs). In addition, for an FEN to execute a specific service, it has to be provisioned with the corresponding service image. Most of the previous work on the service provisioning in the FEC environment deals with determining an appropriate FEN satisfying the requirements like delay, CPU and storage from the perspective of one or more service requests. In this paper, we determined how to optimally place service images in consideration of the pre-obtained service demands which may be collected during the prior time interval. The proposed FEC environment is scalable in the sense that the resources of FENs are effectively utilized thanks to the optimal provisioning of services on FENs. We propose two approaches to provision service images on FENs. In order to validate the performance of the proposed mechanisms, intensive simulations were carried out for various service demand scenarios.}, } @article {pmid33671281, year = {2021}, author = {Adnan, M and Iqbal, J and Waheed, A and Amin, NU and Zareei, M and Goudarzi, S and Umer, A}, title = {On the Design of Efficient Hierarchic Architecture for Software Defined Vehicular Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {4}, pages = {}, pmid = {33671281}, issn = {1424-8220}, support = {grant ID: GGPM-2020-029 and grant ID: PP-FTSM-2020//The Ministry of Higher Education Malaysia and Universiti Kebangsaan Malaysia/ ; }, abstract = {Modern vehicles are equipped with various sensors, onboard units, and devices such as Application Unit (AU) that support routing and communication. In VANETs, traffic management and Quality of Service (QoS) are the main research dimensions to be considered while designing VANETs architectures. To cope with the issues of QoS faced by the VANETs, we design an efficient SDN-based architecture where we focus on the QoS of VANETs. In this paper, QoS is achieved by a priority-based scheduling algorithm in which we prioritize traffic flow messages in the safety queue and non-safety queue. In the safety queue, the messages are prioritized based on deadline and size using the New Deadline and Size of data method (NDS) with constrained location and deadline. In contrast, the non-safety queue is prioritized based on First Come First Serve (FCFS) method. For the simulation of our proposed scheduling algorithm, we use a well-known cloud computing framework CloudSim toolkit. The simulation results of safety messages show better performance than non-safety messages in terms of execution time.}, } @article {pmid33671142, year = {2021}, author = {Fang, J and Shi, J and Lu, S and Zhang, M and Ye, Z}, title = {An Efficient Computation Offloading Strategy with Mobile Edge Computing for IoT.}, journal = {Micromachines}, volume = {12}, number = {2}, pages = {}, pmid = {33671142}, issn = {2072-666X}, support = {61202076//National Natural Science Foundation of China/ ; 4192007//Beijing Natural Science Foundation/ ; }, abstract = {With the rapidly development of mobile cloud computing (MCC), the Internet of Things (IoT), and artificial intelligence (AI), user equipment (UEs) are facing explosive growth. In order to effectively solve the problem that UEs may face with insufficient capacity when dealing with computationally intensive and delay sensitive applications, we take Mobile Edge Computing (MEC) of the IoT as the starting point and study the computation offloading strategy of UEs. First, we model the application generated by UEs as a directed acyclic graph (DAG) to achieve fine-grained task offloading scheduling, which makes the parallel processing of tasks possible and speeds up the execution efficiency. Then, we propose a multi-population cooperative elite algorithm (MCE-GA) based on the standard genetic algorithm, which can solve the offloading problem for tasks with dependency in MEC to minimize the execution delay and energy consumption of applications. Experimental results show that MCE-GA has better performance compared to the baseline algorithms. To be specific, the overhead reduction by MCE-GA can be up to 72.4%, 38.6%, and 19.3%, respectively, which proves the effectiveness and reliability of MCE-GA.}, } @article {pmid33670040, year = {2021}, author = {Shang, M and Luo, J}, title = {The Tapio Decoupling Principle and Key Strategies for Changing Factors of Chinese Urban Carbon Footprint Based on Cloud Computing.}, journal = {International journal of environmental research and public health}, volume = {18}, number = {4}, pages = {}, pmid = {33670040}, issn = {1660-4601}, mesh = {Carbon/analysis ; *Carbon Footprint ; China ; Cities ; Cloud Computing ; Economic Development ; *Ecosystem ; }, abstract = {The expansion of Xi'an City has caused the consumption of energy and land resources, leading to serious environmental pollution problems. For this purpose, this study was carried out to measure the carbon carrying capacity, net carbon footprint and net carbon footprint pressure index of Xi'an City, and to characterize the carbon sequestration capacity of Xi'an ecosystem, thereby laying a foundation for developing comprehensive and reasonable low-carbon development measures. This study expects to provide a reference for China to develop a low-carbon economy through Tapio decoupling principle. The decoupling relationship between CO2 and driving factors was explored through Tapio decoupling model. The time-series data was used to calculate the carbon footprint. The auto-encoder in deep learning technology was combined with the parallel algorithm in cloud computing. A general multilayer perceptron neural network realized by a parallel BP learning algorithm was proposed based on Map-Reduce on a cloud computing cluster. A partial least squares (PLS) regression model was constructed to analyze driving factors. The results show that in terms of city size, the variable importance in projection (VIP) output of the urbanization rate has a strong inhibitory effect on carbon footprint growth, and the VIP value of permanent population ranks the last; in terms of economic development, the impact of fixed asset investment and added value of the secondary industry on carbon footprint ranks third and fourth. As a result, the marginal effect of carbon footprint is greater than that of economic growth after economic growth reaches a certain stage, revealing that the driving forces and mechanisms can promote the growth of urban space.}, } @article {pmid33668282, year = {2021}, author = {Goyal, S and Bhushan, S and Kumar, Y and Rana, AUHS and Bhutta, MR and Ijaz, MF and Son, Y}, title = {An Optimized Framework for Energy-Resource Allocation in A Cloud Environment based on the Whale Optimization Algorithm.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {5}, pages = {}, pmid = {33668282}, issn = {1424-8220}, support = {2020R1C1C1003425//National Research Foundation of Korea/ ; }, abstract = {Cloud computing offers the services to access, manipulate and configure data online over the web. The cloud term refers to an internet network which is remotely available and accessible at anytime from anywhere. Cloud computing is undoubtedly an innovation as the investment in the real and physical infrastructure is much greater than the cloud technology investment. The present work addresses the issue of power consumption done by cloud infrastructure. As there is a need for algorithms and techniques that can reduce energy consumption and schedule resource for the effectiveness of servers. Load balancing is also a significant part of cloud technology that enables the balanced distribution of load among multiple servers to fulfill users' growing demand. The present work used various optimization algorithms such as particle swarm optimization (PSO), cat swarm optimization (CSO), BAT, cuckoo search algorithm (CSA) optimization algorithm and the whale optimization algorithm (WOA) for balancing the load, energy efficiency, and better resource scheduling to make an efficient cloud environment. In the case of seven servers and eight server's settings, the results revealed that whale optimization algorithm outperformed other algorithms in terms of response time, energy consumption, execution time and throughput.}, } @article {pmid33664984, year = {2020}, author = {Stevens, L and Kao, D and Hall, J and Görg, C and Abdo, K and Linstead, E}, title = {ML-MEDIC: A Preliminary Study of an Interactive Visual Analysis Tool Facilitating Clinical Applications of Machine Learning for Precision Medicine.}, journal = {Applied sciences (Basel, Switzerland)}, volume = {10}, number = {9}, pages = {}, pmid = {33664984}, issn = {2076-3417}, support = {T15 LM009451/LM/NLM NIH HHS/United States ; }, abstract = {Accessible interactive tools that integrate machine learning methods with clinical research and reduce the programming experience required are needed to move science forward. Here, we present Machine Learning for Medical Exploration and Data-Inspired Care (ML-MEDIC), a point-and-click, interactive tool with a visual interface for facilitating machine learning and statistical analyses in clinical research. We deployed ML-MEDIC in the American Heart Association (AHA) Precision Medicine Platform to provide secure internet access and facilitate collaboration. ML-MEDIC's efficacy for facilitating the adoption of machine learning was evaluated through two case studies in collaboration with clinical domain experts. A domain expert review was also conducted to obtain an impression of the usability and potential limitations.}, } @article {pmid33664272, year = {2021}, author = {Shiff, S and Helman, D and Lensky, IM}, title = {Worldwide continuous gap-filled MODIS land surface temperature dataset.}, journal = {Scientific data}, volume = {8}, number = {1}, pages = {74}, pmid = {33664272}, issn = {2052-4463}, support = {203-1184-19//Ministry of Agriculture and Rural Development (Israeli Ministry of Agriculture and Rural Development)/ ; }, abstract = {Satellite land surface temperature (LST) is vital for climatological and environmental studies. However, LST datasets are not continuous in time and space mainly due to cloud cover. Here we combine LST with Climate Forecast System Version 2 (CFSv2) modeled temperatures to derive a continuous gap filled global LST dataset at a spatial resolution of 1 km. Temporal Fourier analysis is used to derive the seasonality (climatology) on a pixel-by-pixel basis, for LST and CFSv2 temperatures. Gaps are filled by adding the CFSv2 temperature anomaly to climatological LST. The accuracy is evaluated in nine regions across the globe using cloud-free LST (mean values: R[2] = 0.93, Root Mean Square Error (RMSE) = 2.7 °C, Mean Absolute Error (MAE) = 2.1 °C). The provided dataset contains day, night, and daily mean LST for the Eastern Mediterranean. We provide a Google Earth Engine code and a web app that generates gap filled LST in any part of the world, alongside a pixel-based evaluation of the data in terms of MAE, RMSE and Pearson's r.}, } @article {pmid33657217, year = {2021}, author = {Figueroa, CA and Aguilera, A and Chakraborty, B and Modiri, A and Aggarwal, J and Deliu, N and Sarkar, U and Jay Williams, J and Lyles, CR}, title = {Adaptive learning algorithms to optimize mobile applications for behavioral health: guidelines for design decisions.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {28}, number = {6}, pages = {1225-1234}, pmid = {33657217}, issn = {1527-974X}, support = {R01 HS025429/HS/AHRQ HHS/United States ; }, mesh = {Algorithms ; Humans ; Machine Learning ; *Mobile Applications ; Reproducibility of Results ; *Telemedicine ; }, abstract = {OBJECTIVE: Providing behavioral health interventions via smartphones allows these interventions to be adapted to the changing behavior, preferences, and needs of individuals. This can be achieved through reinforcement learning (RL), a sub-area of machine learning. However, many challenges could affect the effectiveness of these algorithms in the real world. We provide guidelines for decision-making.

MATERIALS AND METHODS: Using thematic analysis, we describe challenges, considerations, and solutions for algorithm design decisions in a collaboration between health services researchers, clinicians, and data scientists. We use the design process of an RL algorithm for a mobile health study "DIAMANTE" for increasing physical activity in underserved patients with diabetes and depression. Over the 1.5-year project, we kept track of the research process using collaborative cloud Google Documents, Whatsapp messenger, and video teleconferencing. We discussed, categorized, and coded critical challenges. We grouped challenges to create thematic topic process domains.

RESULTS: Nine challenges emerged, which we divided into 3 major themes: 1. Choosing the model for decision-making, including appropriate contextual and reward variables; 2. Data handling/collection, such as how to deal with missing or incorrect data in real-time; 3. Weighing the algorithm performance vs effectiveness/implementation in real-world settings.

CONCLUSION: The creation of effective behavioral health interventions does not depend only on final algorithm performance. Many decisions in the real world are necessary to formulate the design of problem parameters to which an algorithm is applied. Researchers must document and evaulate these considerations and decisions before and during the intervention period, to increase transparency, accountability, and reproducibility.

TRIAL REGISTRATION: clinicaltrials.gov, NCT03490253.}, } @article {pmid33656996, year = {2022}, author = {Huang, Q and Yue, W and Yang, Y and Chen, L}, title = {P2GT: Fine-Grained Genomic Data Access Control With Privacy-Preserving Testing in Cloud Computing.}, journal = {IEEE/ACM transactions on computational biology and bioinformatics}, volume = {19}, number = {4}, pages = {2385-2398}, doi = {10.1109/TCBB.2021.3063388}, pmid = {33656996}, issn = {1557-9964}, mesh = {Algorithms ; *Cloud Computing ; Computer Security ; Genomics ; *Privacy ; }, abstract = {With the rapid development of bioinformatics and the availability of genetic sequencing technologies, genomic data has been used to facilitate personalized medicine. Cloud computing, features as low cost, rich storage and rapid processing can precisely respond to the challenges brought by the emergence of massive genomic data. Considering the security of cloud platform and the privacy of genomic data, we first introduce P2GT which utilizes key-policy attribute-based encryption to realize genomic data access control with unbounded attributes, and employs equality test algorithm to achieve personalized medicine test by matching digitized single nucleotide polymorphisms (SNPs) directly on the users' ciphertext without encrypting multiple times. We then propose an enhanced scheme P2GT+, which adopts identity-based encryption with equality test supporting flexible joint authorization to realize privacy-preserving paternity test, genetic compatibility test and disease susceptibility test over the encrypted SNPs with P2GT. We prove the security of proposed schemes and conduct extensive experiments with the 1,000 Genomes dataset. The results show that P2GT and P2GT+ are practical and scalable enough to meet the privacy-preserving and authorized genetic testing requirements in cloud computing.}, } @article {pmid33656352, year = {2021}, author = {Elgendy, IA and Muthanna, A and Hammoudeh, M and Shaiba, H and Unal, D and Khayyat, M}, title = {Advanced Deep Learning for Resource Allocation and Security Aware Data Offloading in Industrial Mobile Edge Computing.}, journal = {Big data}, volume = {9}, number = {4}, pages = {265-278}, doi = {10.1089/big.2020.0284}, pmid = {33656352}, issn = {2167-647X}, mesh = {Algorithms ; Cloud Computing ; Computer Security ; *Deep Learning ; Resource Allocation ; }, abstract = {The Internet of Things (IoT) is permeating our daily lives through continuous environmental monitoring and data collection. The promise of low latency communication, enhanced security, and efficient bandwidth utilization lead to the shift from mobile cloud computing to mobile edge computing. In this study, we propose an advanced deep reinforcement resource allocation and security-aware data offloading model that considers the constrained computation and radio resources of industrial IoT devices to guarantee efficient sharing of resources between multiple users. This model is formulated as an optimization problem with the goal of decreasing energy consumption and computation delay. This type of problem is non-deterministic polynomial time-hard due to the curse-of-dimensionality challenge, thus, a deep learning optimization approach is presented to find an optimal solution. In addition, a 128-bit Advanced Encryption Standard-based cryptographic approach is proposed to satisfy the data security requirements. Experimental evaluation results show that the proposed model can reduce offloading overhead in terms of energy and time by up to 64.7% in comparison with the local execution approach. It also outperforms the full offloading scenario by up to 13.2%, where it can select some computation tasks to be offloaded while optimally rejecting others. Finally, it is adaptable and scalable for a large number of mobile devices.}, } @article {pmid33655263, year = {2021}, author = {Machi, D and Bhattacharya, P and Hoops, S and Chen, J and Mortveit, H and Venkatramanan, S and Lewis, B and Wilson, M and Fadikar, A and Maiden, T and Barrett, CL and Marathe, MV}, title = {Scalable Epidemiological Workflows to Support COVID-19 Planning and Response.}, journal = {medRxiv : the preprint server for health sciences}, volume = {}, number = {}, pages = {}, doi = {10.1101/2021.02.23.21252325}, pmid = {33655263}, abstract = {The COVID-19 global outbreak represents the most significant epidemic event since the 1918 influenza pandemic. Simulations have played a crucial role in supporting COVID-19 planning and response efforts. Developing scalable workflows to provide policymakers quick responses to important questions pertaining to logistics, resource allocation, epidemic forecasts and intervention analysis remains a challenging computational problem. In this work, we present scalable high performance computing-enabled workflows for COVID-19 pandemic planning and response. The scalability of our methodology allows us to run fine-grained simulations daily, and to generate county-level forecasts and other counter-factual analysis for each of the 50 states (and DC), 3140 counties across the USA. Our workflows use a hybrid cloud/cluster system utilizing a combination of local and remote cluster computing facilities, and using over 20,000 CPU cores running for 6-9 hours every day to meet this objective. Our state (Virginia), state hospital network, our university, the DOD and the CDC use our models to guide their COVID-19 planning and response efforts. We began executing these pipelines March 25, 2020, and have delivered and briefed weekly updates to these stakeholders for over 30 weeks without interruption.}, } @article {pmid33644298, year = {2021}, author = {Abbasi, WA and Abbas, SA and Andleeb, S and Ul Islam, G and Ajaz, SA and Arshad, K and Khalil, S and Anjam, A and Ilyas, K and Saleem, M and Chughtai, J and Abbas, A}, title = {COVIDC: An expert system to diagnose COVID-19 and predict its severity using chest CT scans: Application in radiology.}, journal = {Informatics in medicine unlocked}, volume = {23}, number = {}, pages = {100540}, pmid = {33644298}, issn = {2352-9148}, abstract = {Early diagnosis of Coronavirus disease 2019 (COVID-19) is significantly important, especially in the absence or inadequate provision of a specific vaccine, to stop the surge of this lethal infection by advising quarantine. This diagnosis is challenging as most of the patients having COVID-19 infection stay asymptomatic while others showing symptoms are hard to distinguish from patients having different respiratory infections such as severe flu and Pneumonia. Due to cost and time-consuming wet-lab diagnostic tests for COVID-19, there is an utmost requirement for some alternate, non-invasive, rapid, and discounted automatic screening system. A chest CT scan can effectively be used as an alternative modality to detect and diagnose the COVID-19 infection. In this study, we present an automatic COVID-19 diagnostic and severity prediction system called COVIDC (COVID-19 detection using CT scans) that uses deep feature maps from the chest CT scans for this purpose. Our newly proposed system not only detects COVID-19 but also predicts its severity by using a two-phase classification approach (COVID vs non-COVID, and COVID-19 severity) with deep feature maps and different shallow supervised classification algorithms such as SVMs and random forest to handle data scarcity. We performed a stringent COVIDC performance evaluation not only through 10-fold cross-validation and an external validation dataset but also in a real setting under the supervision of an experienced radiologist. In all the evaluation settings, COVIDC outperformed all the existing state-of-the-art methods designed to detect COVID-19 with an F1 score of 0.94 on the validation dataset and justified its use to diagnose COVID-19 effectively in the real setting by classifying correctly 9 out of 10 COVID-19 CT scans. We made COVIDC openly accessible through a cloud-based webserver and python code available at https://sites.google.com/view/wajidarshad/software and https://github.com/wajidarshad/covidc.}, } @article {pmid33643498, year = {2021}, author = {Smidt, HJ and Jokonya, O}, title = {The challenge of privacy and security when using technology to track people in times of COVID-19 pandemic.}, journal = {Procedia computer science}, volume = {181}, number = {}, pages = {1018-1026}, pmid = {33643498}, issn = {1877-0509}, abstract = {Since the start of the Coronavirus disease 2019 (COVID-19) governments and health authorities across the world have find it very difficult in controlling infections. Digital technologies such as artificial intelligence (AI), big data, cloud computing, blockchain and 5G have effectively improved the efficiency of efforts in epidemic monitoring, virus tracking, prevention, control and treatment. Surveillance to halt COVID-19 has raised privacy concerns, as many governments are willing to overlook privacy implications to save lives. The purpose of this paper is to conduct a focused Systematic Literature Review (SLR), to explore the potential benefits and implications of using digital technologies such as AI, big data and cloud to track COVID-19 amongst people in different societies. The aim is to highlight the risks of security and privacy to personal data when using technology to track COVID-19 in societies and identify ways to govern these risks. The paper uses the SLR approach to examine 40 articles published during 2020, ultimately down selecting to the most relevant 24 studies. In this SLR approach we adopted the following steps; formulated the problem, searched the literature, gathered information from studies, evaluated the quality of studies, analysed and integrated the outcomes of studies while concluding by interpreting the evidence and presenting the results. Papers were classified into different categories such as technology use, impact on society and governance. The study highlighted the challenge for government to balance the need of what is good for public health versus individual privacy and freedoms. The findings revealed that although the use of technology help governments and health agencies reduce the spread of the COVID-19 virus, government surveillance to halt has sparked privacy concerns. We suggest some requirements for government policy to be ethical and capable of commanding the trust of the public and present some research questions for future research.}, } @article {pmid33633531, year = {2021}, author = {Brivio, S and Ly, DRB and Vianello, E and Spiga, S}, title = {Non-linear Memristive Synaptic Dynamics for Efficient Unsupervised Learning in Spiking Neural Networks.}, journal = {Frontiers in neuroscience}, volume = {15}, number = {}, pages = {580909}, pmid = {33633531}, issn = {1662-4548}, abstract = {Spiking neural networks (SNNs) are a computational tool in which the information is coded into spikes, as in some parts of the brain, differently from conventional neural networks (NNs) that compute over real-numbers. Therefore, SNNs can implement intelligent information extraction in real-time at the edge of data acquisition and correspond to a complementary solution to conventional NNs working for cloud-computing. Both NN classes face hardware constraints due to limited computing parallelism and separation of logic and memory. Emerging memory devices, like resistive switching memories, phase change memories, or memristive devices in general are strong candidates to remove these hurdles for NN applications. The well-established training procedures of conventional NNs helped in defining the desiderata for memristive device dynamics implementing synaptic units. The generally agreed requirements are a linear evolution of memristive conductance upon stimulation with train of identical pulses and a symmetric conductance change for conductance increase and decrease. Conversely, little work has been done to understand the main properties of memristive devices supporting efficient SNN operation. The reason lies in the lack of a background theory for their training. As a consequence, requirements for NNs have been taken as a reference to develop memristive devices for SNNs. In the present work, we show that, for efficient CMOS/memristive SNNs, the requirements for synaptic memristive dynamics are very different from the needs of a conventional NN. System-level simulations of a SNN trained to classify hand-written digit images through a spike timing dependent plasticity protocol are performed considering various linear and non-linear plausible synaptic memristive dynamics. We consider memristive dynamics bounded by artificial hard conductance values and limited by the natural dynamics evolution toward asymptotic values (soft-boundaries). We quantitatively analyze the impact of resolution and non-linearity properties of the synapses on the network training and classification performance. Finally, we demonstrate that the non-linear synapses with hard boundary values enable higher classification performance and realize the best trade-off between classification accuracy and required training time. With reference to the obtained results, we discuss how memristive devices with non-linear dynamics constitute a technologically convenient solution for the development of on-line SNN training.}, } @article {pmid33625229, year = {2021}, author = {Bai, J and Bandla, C and Guo, J and Vera Alvarez, R and Bai, M and Vizcaíno, JA and Moreno, P and Grüning, B and Sallou, O and Perez-Riverol, Y}, title = {BioContainers Registry: Searching Bioinformatics and Proteomics Tools, Packages, and Containers.}, journal = {Journal of proteome research}, volume = {20}, number = {4}, pages = {2056-2061}, pmid = {33625229}, issn = {1535-3907}, support = {/WT_/Wellcome Trust/United Kingdom ; 208391/WT_/Wellcome Trust/United Kingdom ; 208391/Z/17/Z/WT_/Wellcome Trust/United Kingdom ; }, mesh = {*Computational Biology ; *Proteomics ; Registries ; Reproducibility of Results ; Software ; }, abstract = {BioContainers is an open-source project that aims to create, store, and distribute bioinformatics software containers and packages. The BioContainers community has developed a set of guidelines to standardize software containers including the metadata, versions, licenses, and software dependencies. BioContainers supports multiple packaging and container technologies such as Conda, Docker, and Singularity. The BioContainers provide over 9000 bioinformatics tools, including more than 200 proteomics and mass spectrometry tools. Here we introduce the BioContainers Registry and Restful API to make containerized bioinformatics tools more findable, accessible, interoperable, and reusable (FAIR). The BioContainers Registry provides a fast and convenient way to find and retrieve bioinformatics tool packages and containers. By doing so, it will increase the use of bioinformatics packages and containers while promoting replicability and reproducibility in research.}, } @article {pmid33621175, year = {2021}, author = {Katakol, S and Elbarashy, B and Herranz, L and van de Weijer, J and Lopez, AM}, title = {Distributed Learning and Inference With Compressed Images.}, journal = {IEEE transactions on image processing : a publication of the IEEE Signal Processing Society}, volume = {30}, number = {}, pages = {3069-3083}, doi = {10.1109/TIP.2021.3058545}, pmid = {33621175}, issn = {1941-0042}, abstract = {Modern computer vision requires processing large amounts of data, both while training the model and/or during inference, once the model is deployed. Scenarios where images are captured and processed in physically separated locations are increasingly common (e.g. autonomous vehicles, cloud computing, smartphones). In addition, many devices suffer from limited resources to store or transmit data (e.g. storage space, channel capacity). In these scenarios, lossy image compression plays a crucial role to effectively increase the number of images collected under such constraints. However, lossy compression entails some undesired degradation of the data that may harm the performance of the downstream analysis task at hand, since important semantic information may be lost in the process. Moreover, we may only have compressed images at training time but are able to use original images at inference time (i.e. test), or vice versa, and in such a case, the downstream model suffers from covariate shift. In this paper, we analyze this phenomenon, with a special focus on vision-based perception for autonomous driving as a paradigmatic scenario. We see that loss of semantic information and covariate shift do indeed exist, resulting in a drop in performance that depends on the compression rate. In order to address the problem, we propose dataset restoration, based on image restoration with generative adversarial networks (GANs). Our method is agnostic to both the particular image compression method and the downstream task; and has the advantage of not adding additional cost to the deployed models, which is particularly important in resource-limited devices. The presented experiments focus on semantic segmentation as a challenging use case, cover a broad range of compression rates and diverse datasets, and show how our method is able to significantly alleviate the negative effects of compression on the downstream visual task.}, } @article {pmid33611874, year = {2021}, author = {Seong, Y and You, SC and Ostropolets, A and Rho, Y and Park, J and Cho, J and Dymshyts, D and Reich, CG and Heo, Y and Park, RW}, title = {Incorporation of Korean Electronic Data Interchange Vocabulary into Observational Medical Outcomes Partnership Vocabulary.}, journal = {Healthcare informatics research}, volume = {27}, number = {1}, pages = {29-38}, pmid = {33611874}, issn = {2093-3681}, support = {//Health Insurance Review and Assessment Service/ ; }, abstract = {OBJECTIVES: We incorporated the Korean Electronic Data Interchange (EDI) vocabulary into Observational Medical Outcomes Partnership (OMOP) vocabulary using a semi-automated process. The goal of this study was to improve the Korean EDI as a standard medical ontology in Korea.

METHODS: We incorporated the EDI vocabulary into OMOP vocabulary through four main steps. First, we improved the current classification of EDI domains and separated medical services into procedures and measurements. Second, each EDI concept was assigned a unique identifier and validity dates. Third, we built a vertical hierarchy between EDI concepts, fully describing child concepts through relationships and attributes and linking them to parent terms. Finally, we added an English definition for each EDI concept. We translated the Korean definitions of EDI concepts using Google.Cloud.Translation.V3, using a client library and manual translation. We evaluated the EDI using 11 auditing criteria for controlled vocabularies.

RESULTS: We incorporated 313,431 concepts from the EDI to the OMOP Standardized Vocabularies. For 10 of the 11 auditing criteria, EDI showed a better quality index within the OMOP vocabulary than in the original EDI vocabulary.

CONCLUSIONS: The incorporation of the EDI vocabulary into the OMOP Standardized Vocabularies allows better standardization to facilitate network research. Our research provides a promising model for mapping Korean medical information into a global standard terminology system, although a comprehensive mapping of official vocabulary remains to be done in the future.}, } @article {pmid33602102, year = {2022}, author = {Rao, PMM and Singh, SK and Khamparia, A and Bhushan, B and Podder, P}, title = {Multi-Class Breast Cancer Classification Using Ensemble of Pretrained models and Transfer Learning.}, journal = {Current medical imaging}, volume = {18}, number = {4}, pages = {409-416}, doi = {10.2174/1573405617666210218101418}, pmid = {33602102}, issn = {1573-4056}, mesh = {Breast ; *Breast Neoplasms/diagnostic imaging ; Female ; Humans ; Machine Learning ; Neural Networks, Computer ; }, abstract = {AIMS: Early detection of breast cancer has reduced many deaths. Earlier CAD systems used to be the second opinion for radiologists and clinicians. Machine learning and deep learning have brought tremendous changes in medical diagnosis and imagining.

BACKGROUND: Breast cancer is the most commonly occurring cancer in women and it is the second most common cancer overall. According to the 2018 statistics, there were over 2million cases all over the world. Belgium and Luxembourg have the highest rate of cancer.

OBJECTIVE: A method for breast cancer detection has been proposed using Ensemble learning. 2- class and 8-class classification is performed.

METHODS: To deal with imbalance classification, the authors have proposed an ensemble of pretrained models.

RESULTS: 98.5% training accuracy and 89% of test accuracy are achieved on 8-class classification. Moreover, 99.1% and 98% train and test accuracy are achieved on 2 class classification.

CONCLUSION: it is found that there are high misclassifications in class DC when compared to the other classes, this is due to the imbalance in the dataset. In the future, one can increase the size of the datasets or use different methods. In implement this research work, authors have used 2 Nvidia Tesla V100 GPU's in google cloud platform.}, } @article {pmid33600344, year = {2021}, author = {R Niakan Kalhori, S and Bahaadinbeigy, K and Deldar, K and Gholamzadeh, M and Hajesmaeel-Gohari, S and Ayyoubzadeh, SM}, title = {Digital Health Solutions to Control the COVID-19 Pandemic in Countries With High Disease Prevalence: Literature Review.}, journal = {Journal of medical Internet research}, volume = {23}, number = {3}, pages = {e19473}, pmid = {33600344}, issn = {1438-8871}, mesh = {Humans ; *COVID-19/epidemiology/prevention & control ; *Infection Control/methods ; Information Technology/standards ; *Pandemics/prevention & control ; Prevalence ; SARS-CoV-2/isolation & purification ; *Telemedicine/organization & administration ; }, abstract = {BACKGROUND: COVID-19, the disease caused by the novel coronavirus SARS-CoV-2, has become a global pandemic, affecting most countries worldwide. Digital health information technologies can be applied in three aspects, namely digital patients, digital devices, and digital clinics, and could be useful in fighting the COVID-19 pandemic.

OBJECTIVE: Recent reviews have examined the role of digital health in controlling COVID-19 to identify the potential of digital health interventions to fight the disease. However, this study aims to review and analyze the digital technology that is being applied to control the COVID-19 pandemic in the 10 countries with the highest prevalence of the disease.

METHODS: For this review, the Google Scholar, PubMed, Web of Science, and Scopus databases were searched in August 2020 to retrieve publications from December 2019 to March 15, 2020. Furthermore, the Google search engine was used to identify additional applications of digital health for COVID-19 pandemic control.

RESULTS: We included 32 papers in this review that reported 37 digital health applications for COVID-19 control. The most common digital health projects to address COVID-19 were telemedicine visits (11/37, 30%). Digital learning packages for informing people about the disease, geographic information systems and quick response code applications for real-time case tracking, and cloud- or mobile-based systems for self-care and patient tracking were in the second rank of digital tool applications (all 7/37, 19%). The projects were deployed in various European countries and in the United States, Australia, and China.

CONCLUSIONS: Considering the potential of available information technologies worldwide in the 21st century, particularly in developed countries, it appears that more digital health products with a higher level of intelligence capability remain to be applied for the management of pandemics and health-related crises.}, } @article {pmid33591447, year = {2022}, author = {Jheng, YC and Wang, YP and Lin, HE and Sung, KY and Chu, YC and Wang, HS and Jiang, JK and Hou, MC and Lee, FY and Lu, CL}, title = {A novel machine learning-based algorithm to identify and classify lesions and anatomical landmarks in colonoscopy images.}, journal = {Surgical endoscopy}, volume = {36}, number = {1}, pages = {640-650}, pmid = {33591447}, issn = {1432-2218}, mesh = {Algorithms ; *Artificial Intelligence ; *Colonic Polyps/diagnostic imaging ; Colonoscopy/methods ; Humans ; Machine Learning ; }, abstract = {OBJECTIVES: Computer-aided diagnosis (CAD)-based artificial intelligence (AI) has been shown to be highly accurate for detecting and characterizing colon polyps. However, the application of AI to identify normal colon landmarks and differentiate multiple colon diseases has not yet been established. We aimed to develop a convolutional neural network (CNN)-based algorithm (GUTAID) to recognize different colon lesions and anatomical landmarks.

METHODS: Colonoscopic images were obtained to train and validate the AI classifiers. An independent dataset was collected for verification. The architecture of GUTAID contains two major sub-models: the Normal, Polyp, Diverticulum, Cecum and CAncer (NPDCCA) and Narrow-Band Imaging for Adenomatous/Hyperplastic polyps (NBI-AH) models. The development of GUTAID was based on the 16-layer Visual Geometry Group (VGG16) architecture and implemented on Google Cloud Platform.

RESULTS: In total, 7838 colonoscopy images were used for developing and validating the AI model. An additional 1273 images were independently applied to verify the GUTAID. The accuracy for GUTAID in detecting various colon lesions/landmarks is 93.3% for polyps, 93.9% for diverticula, 91.7% for cecum, 97.5% for cancer, and 83.5% for adenomatous/hyperplastic polyps.

CONCLUSIONS: A CNN-based algorithm (GUTAID) to identify colonic abnormalities and landmarks was successfully established with high accuracy. This GUTAID system can further characterize polyps for optical diagnosis. We demonstrated that AI classification methodology is feasible to identify multiple and different colon diseases.}, } @article {pmid33583322, year = {2021}, author = {Hacking, S and Bijol, V}, title = {Deep learning for the classification of medical kidney disease: a pilot study for electron microscopy.}, journal = {Ultrastructural pathology}, volume = {45}, number = {2}, pages = {118-127}, doi = {10.1080/01913123.2021.1882628}, pmid = {33583322}, issn = {1521-0758}, mesh = {Artificial Intelligence ; *Deep Learning ; Humans ; *Kidney Diseases/diagnosis ; Microscopy, Electron ; Pilot Projects ; }, abstract = {Artificial intelligence (AI) is a new frontier and often enigmatic for medical professionals. Cloud computing could open up the field of computer vision to a wider medical audience and deep learning on the cloud allows one to design, develop, train and deploy applications with ease. In the field of histopathology, the implementation of various applications in AI has been successful for whole slide images rich in biological diversity. However, the analysis of other tissue medias, including electron microscopy, is yet to be explored. The present study aims to evaluate deep learning for the classification of medical kidney disease on electron microscopy images: amyloidosis, diabetic glomerulosclerosis, membranous nephropathy, membranoproliferative glomerulonephritis (MPGN), and thin basement membrane disease (TBMD). We found good overall classification with the MedKidneyEM-v1 Classifier and when looking at normal and diseased kidneys, the average area under the curve for precision and recall was 0.841. The average area under the curve for precision and recall on the disease only cohort was 0.909. Digital pathology will shape a new era for medical kidney disease and the present study demonstrates the feasibility of deep learning for electron microscopy. Future approaches could be used by renal pathologists to improve diagnostic concordance, determine therapeutic strategies, and optimize patient outcomes in a true clinical environment.}, } @article {pmid33572132, year = {2021}, author = {Tradacete, M and Santos, C and Jiménez, JA and Rodríguez, FJ and Martín, P and Santiso, E and Gayo, M}, title = {Turning Base Transceiver Stations into Scalable and Controllable DC Microgrids Based on a Smart Sensing Strategy.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {4}, pages = {}, pmid = {33572132}, issn = {1424-8220}, support = {RTC-2017-6231-3//Ministerio de Ciencia, Innovación y Universidades/ ; P2018/EMT-4366//Dirección General de Universidades e Investigación/ ; }, abstract = {This paper describes a practical approach to the transformation of Base Transceiver Stations (BTSs) into scalable and controllable DC Microgrids in which an energy management system (EMS) is developed to maximize the economic benefit. The EMS strategy focuses on efficiently managing a Battery Energy Storage System (BESS) along with photovoltaic (PV) energy generation, and non-critical load-shedding. The EMS collects data such as real-time energy consumption and generation, and environmental parameters such as temperature, wind speed and irradiance, using a smart sensing strategy whereby measurements can be recorded and computing can be performed both locally and in the cloud. Within the Spanish electricity market and applying a two-tariff pricing, annual savings per installed battery power of 16.8 euros/kW are achieved. The system has the advantage that it can be applied to both new and existing installations, providing a two-way connection to the electricity grid, PV generation, smart measurement systems and the necessary management software. All these functions are integrated in a flexible and low cost HW/SW architecture. Finally, the whole system is validated through real tests carried out on a pilot plant and under different weather conditions.}, } @article {pmid33569265, year = {2021}, author = {St-Onge, C and Benmakrelouf, S and Kara, N and Tout, H and Edstrom, C and Rabipour, R}, title = {Generic SDE and GA-based workload modeling for cloud systems.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {10}, number = {1}, pages = {6}, pmid = {33569265}, issn = {2192-113X}, abstract = {Workload models are typically built based on user and application behavior in a system, limiting them to specific domains. Undoubtedly, such a practice creates a dilemma in a cloud computing (cloud) environment, where a wide range of heterogeneous applications are running and many users have access to these resources. The workload model in such an infrastructure must adapt to the evolution of the system configuration parameters, such as job load fluctuation. The aim of this work is to propose an approach that generates generic workload models (1) which are independent of user behavior and the applications running in the system, and can fit any workload domain and type, (2) model sharp workload variations that are most likely to appear in cloud environments, and (3) with high degree of fidelity with respect to observed data, within a short execution time. We propose two approaches for workload estimation, the first being a Hull-White and Genetic Algorithm (GA) combination, while the second is a Support Vector Regression (SVR) and Kalman-filter combination. Thorough experiments are conducted on real CPU and throughput datasets from virtualized IP Multimedia Subsystem (IMS), Web and cloud environments to study the efficiency of both propositions. The results show a higher accuracy for the Hull-White-GA approach with marginal overhead over the SVR-Kalman-Filter combination.}, } @article {pmid33568057, year = {2021}, author = {Gangiredla, J and Rand, H and Benisatto, D and Payne, J and Strittmatter, C and Sanders, J and Wolfgang, WJ and Libuit, K and Herrick, JB and Prarat, M and Toro, M and Farrell, T and Strain, E}, title = {GalaxyTrakr: a distributed analysis tool for public health whole genome sequence data accessible to non-bioinformaticians.}, journal = {BMC genomics}, volume = {22}, number = {1}, pages = {114}, pmid = {33568057}, issn = {1471-2164}, mesh = {Computational Biology ; High-Throughput Nucleotide Sequencing ; Humans ; *Metagenomics ; *Public Health ; Whole Genome Sequencing ; }, abstract = {BACKGROUND: Processing and analyzing whole genome sequencing (WGS) is computationally intense: a single Illumina MiSeq WGS run produces ~ 1 million 250-base-pair reads for each of 24 samples. This poses significant obstacles for smaller laboratories, or laboratories not affiliated with larger projects, which may not have dedicated bioinformatics staff or computing power to effectively use genomic data to protect public health. Building on the success of the cloud-based Galaxy bioinformatics platform (http://galaxyproject.org), already known for its user-friendliness and powerful WGS analytical tools, the Center for Food Safety and Applied Nutrition (CFSAN) at the U.S. Food and Drug Administration (FDA) created a customized 'instance' of the Galaxy environment, called GalaxyTrakr (https://www.galaxytrakr.org), for use by laboratory scientists performing food-safety regulatory research. The goal was to enable laboratories outside of the FDA internal network to (1) perform quality assessments of sequence data, (2) identify links between clinical isolates and positive food/environmental samples, including those at the National Center for Biotechnology Information sequence read archive (https://www.ncbi.nlm.nih.gov/sra/), and (3) explore new methodologies such as metagenomics. GalaxyTrakr hosts a variety of free and adaptable tools and provides the data storage and computing power to run the tools. These tools support coordinated analytic methods and consistent interpretation of results across laboratories. Users can create and share tools for their specific needs and use sequence data generated locally and elsewhere.

RESULTS: In its first full year (2018), GalaxyTrakr processed over 85,000 jobs and went from 25 to 250 users, representing 53 different public and state health laboratories, academic institutions, international health laboratories, and federal organizations. By mid-2020, it has grown to 600 registered users and processed over 450,000 analytical jobs. To illustrate how laboratories are making use of this resource, we describe how six institutions use GalaxyTrakr to quickly analyze and review their data. Instructions for participating in GalaxyTrakr are provided.

CONCLUSIONS: GalaxyTrakr advances food safety by providing reliable and harmonized WGS analyses for public health laboratories and promoting collaboration across laboratories with differing resources. Anticipated enhancements to this resource will include workflows for additional foodborne pathogens, viruses, and parasites, as well as new tools and services.}, } @article {pmid33558984, year = {2021}, author = {Kumar, R and Al-Turjman, F and Anand, L and Kumar, A and Magesh, S and Vengatesan, K and Sitharthan, R and Rajesh, M}, title = {Genomic sequence analysis of lung infections using artificial intelligence technique.}, journal = {Interdisciplinary sciences, computational life sciences}, volume = {13}, number = {2}, pages = {192-200}, pmid = {33558984}, issn = {1867-1462}, mesh = {Genomics ; Lung ; Sequence Analysis ; *Support Vector Machine ; }, abstract = {Attributable to the modernization of Artificial Intelligence (AI) procedures in healthcare services, various developments including Support Vector Machine (SVM), and profound learning. For example, Convolutional Neural systems (CNN) have prevalently engaged in a significant job of various classificational investigation in lung malignant growth, and different infections. In this paper, Parallel based SVM (P-SVM) and IoT has been utilized to examine the ideal order of lung infections caused by genomic sequence. The proposed method develops a new methodology to locate the ideal characterization of lung sicknesses and determine its growth in its early stages, to control the growth and prevent lung sickness. Further, in the investigation, the P-SVM calculation has been created for arranging high-dimensional distinctive lung ailment datasets. The data used in the assessment has been fetched from real-time data through cloud and IoT. The acquired outcome demonstrates that the developed P-SVM calculation has 83% higher accuracy and 88% precision in characterization with ideal informational collections when contrasted with other learning methods.}, } @article {pmid33557230, year = {2021}, author = {Jensen, JN and Hannemose, M and Bærentzen, JA and Wilm, J and Frisvad, JR and Dahl, AB}, title = {Surface Reconstruction from Structured Light Images Using Differentiable Rendering.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {4}, pages = {}, pmid = {33557230}, issn = {1424-8220}, support = {8057-00011B//Innovationsfonden/ ; }, abstract = {When 3D scanning objects, the objective is usually to obtain a continuous surface. However, most surface scanning methods, such as structured light scanning, yield a point cloud. Obtaining a continuous surface from a point cloud requires a subsequent surface reconstruction step, which is directly affected by any error from the computation of the point cloud. In this work, we propose a one-step approach in which we compute the surface directly from structured light images. Our method minimizes the least-squares error between photographs and renderings of a triangle mesh, where the vertex positions of the mesh are the parameters of the minimization problem. To ensure fast iterations during optimization, we use differentiable rendering, which computes images and gradients in a single pass. We present simulation experiments demonstrating that our method for computing a triangle mesh has several advantages over approaches that rely on an intermediate point cloud. Our method can produce accurate reconstructions when initializing the optimization from a sphere. We also show that our method is good at reconstructing sharp edges and that it is robust with respect to image noise. In addition, our method can improve the output from other reconstruction algorithms if we use these for initialization.}, } @article {pmid33557132, year = {2021}, author = {Lahoura, V and Singh, H and Aggarwal, A and Sharma, B and Mohammed, MA and Damaševičius, R and Kadry, S and Cengiz, K}, title = {Cloud Computing-Based Framework for Breast Cancer Diagnosis Using Extreme Learning Machine.}, journal = {Diagnostics (Basel, Switzerland)}, volume = {11}, number = {2}, pages = {}, pmid = {33557132}, issn = {2075-4418}, abstract = {Globally, breast cancer is one of the most significant causes of death among women. Early detection accompanied by prompt treatment can reduce the risk of death due to breast cancer. Currently, machine learning in cloud computing plays a pivotal role in disease diagnosis, but predominantly among the people living in remote areas where medical facilities are scarce. Diagnosis systems based on machine learning act as secondary readers and assist radiologists in the proper diagnosis of diseases, whereas cloud-based systems can support telehealth services and remote diagnostics. Techniques based on artificial neural networks (ANN) have attracted many researchers to explore their capability for disease diagnosis. Extreme learning machine (ELM) is one of the variants of ANN that has a huge potential for solving various classification problems. The framework proposed in this paper amalgamates three research domains: Firstly, ELM is applied for the diagnosis of breast cancer. Secondly, to eliminate insignificant features, the gain ratio feature selection method is employed. Lastly, a cloud computing-based system for remote diagnosis of breast cancer using ELM is proposed. The performance of the cloud-based ELM is compared with some state-of-the-art technologies for disease diagnosis. The results achieved on the Wisconsin Diagnostic Breast Cancer (WBCD) dataset indicate that the cloud-based ELM technique outperforms other results. The best performance results of ELM were found for both the standalone and cloud environments, which were compared. The important findings of the experimental results indicate that the accuracy achieved is 0.9868, the recall is 0.9130, the precision is 0.9054, and the F1-score is 0.8129.}, } @article {pmid33552932, year = {2021}, author = {Ahmad, S and Mehfuz, S and Beg, J and Ahmad Khan, N and Husain Khan, A}, title = {Fuzzy Cloud Based COVID-19 Diagnosis Assistant for identifying affected cases globally using MCDM.}, journal = {Materials today. Proceedings}, volume = {}, number = {}, pages = {}, doi = {10.1016/j.matpr.2021.01.240}, pmid = {33552932}, issn = {2214-7853}, abstract = {The COVID-19, Coronavirus Disease 2019, emerged as a hazardous disease that led to many causalities across the world. Early detection of COVID-19 in patients and proper treatment along with awareness can help to contain COVID-19. Proposed Fuzzy Cloud-Based (FCB) COVID-19 Diagnosis Assistant aims to identify the patients as confirmed, suspects, or suspicious of COVID-19. It categorized the patients into four categories as mild, moderate, severe, or critical. As patients register themselves online on the FCB COVID-19 DA in real-time, it creates the database for the same. This database helps to improve diagnostic accuracy as it contains the latest updates from real-world cases data. A team of doctors, experts, consultants are integrated with the FCB COVID-19 DA for better consultation and prevention. The ultimate aim of this proposed theory of FCB COVID-19 DA is to take control of COVID-19 pandemic and de-accelerate its rate of transmission among the society.}, } @article {pmid33546394, year = {2021}, author = {Alsharif, M and Rawat, DB}, title = {Study of Machine Learning for Cloud Assisted IoT Security as a Service.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {4}, pages = {}, pmid = {33546394}, issn = {1424-8220}, support = {001-2020//Data Science and Cybersecurity Center/ ; }, abstract = {Machine learning (ML) has been emerging as a viable solution for intrusion detection systems (IDS) to secure IoT devices against different types of attacks. ML based IDS (ML-IDS) normally detect network traffic anomalies caused by known attacks as well as newly introduced attacks. Recent research focuses on the functionality metrics of ML techniques, depicting their prediction effectiveness, but overlooked their operational requirements. ML techniques are resource-demanding that require careful adaptation to fit the limited computing resources of a large sector of their operational platform, namely, embedded systems. In this paper, we propose cloud-based service architecture for managing ML models that best fit different IoT device operational configurations for security. An IoT device may benefit from such a service by offloading to the cloud heavy-weight activities such as feature selection, model building, training, and validation, thus reducing its IDS maintenance workload at the IoT device and get the security model back from the cloud as a service.}, } @article {pmid33546287, year = {2021}, author = {Meyer, H and Wei, P and Jiang, X}, title = {Intelligent Video Highlights Generation with Front-Camera Emotion Sensing.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {4}, pages = {}, pmid = {33546287}, issn = {1424-8220}, support = {CNS-1815274//National Science Foundation/ ; CNS-1704899//National Science Foundation/ ; CNS-11943396//National Science Foundation/ ; CNS-1837022//National Science Foundation/ ; }, abstract = {In this paper, we present HOMER, a cloud-based system for video highlight generation which enables the automated, relevant, and flexible segmentation of videos. Our system outperforms state-of-the-art solutions by fusing internal video content-based features with the user's emotion data. While current research mainly focuses on creating video summaries without the use of affective data, our solution achieves the subjective task of detecting highlights by leveraging human emotions. In two separate experiments, including videos filmed with a dual camera setup, and home videos randomly picked from Microsoft's Video Titles in the Wild (VTW) dataset, HOMER demonstrates an improvement of up to 38% in F1-score from baseline, while not requiring any external hardware. We demonstrated both the portability and scalability of HOMER through the implementation of two smartphone applications.}, } @article {pmid33545122, year = {2021}, author = {Alshehri, M and Bhardwaj, A and Kumar, M and Mishra, S and Gyani, J}, title = {Cloud and IoT based smart architecture for desalination water treatment.}, journal = {Environmental research}, volume = {195}, number = {}, pages = {110812}, doi = {10.1016/j.envres.2021.110812}, pmid = {33545122}, issn = {1096-0953}, mesh = {Models, Theoretical ; Seawater ; *Solar Energy ; Sunlight ; *Water Purification ; }, abstract = {Increasing water demand and the deteriorating environment has continuously stressed the requirement for new technology and methods to attain optimized use of resources and desalination management, converting seawater into pure drinking water. In this age, the Internet of Things use allows us to optimize a series of previously complicated processes to perform and required enormous resources. One of these is optimizing the management of water treatment. This research presents an implementable water treatment model and suggests smart environment that can control water treatment plants. The proposed system gathers data and analysing to provide the most efficient approach for water desalination operations. The desalination framework integrates smart enabling technologies such as Cloud Portal, Network communication, Internet of Things, Sensors powered by solar energy with ancient water purification as part of seawater's desalination project. The proposed framework incorporates the new-age technologies, which are essential for efficient and effective operations of desalination systems. The implemented desalination dual membrane framework uses solar energy for purifying saline water using ancient methods to produce clean water for drinking and irrigation. The desalination produced 0.47 m3/l of freshwater from a saline concentration of 10 g/l, consuming 8.31 KWh/m3 energy for production from the prototype implementation, which makes desalination process cost effective.}, } @article {pmid33544692, year = {2021}, author = {Vahidy, F and Jones, SL and Tano, ME and Nicolas, JC and Khan, OA and Meeks, JR and Pan, AP and Menser, T and Sasangohar, F and Naufal, G and Sostman, D and Nasir, K and Kash, BA}, title = {Rapid Response to Drive COVID-19 Research in a Learning Health Care System: Rationale and Design of the Houston Methodist COVID-19 Surveillance and Outcomes Registry (CURATOR).}, journal = {JMIR medical informatics}, volume = {9}, number = {2}, pages = {e26773}, pmid = {33544692}, issn = {2291-9694}, abstract = {BACKGROUND: The COVID-19 pandemic has exacerbated the challenges of meaningful health care digitization. The need for rapid yet validated decision-making requires robust data infrastructure. Organizations with a focus on learning health care (LHC) systems tend to adapt better to rapidly evolving data needs. Few studies have demonstrated a successful implementation of data digitization principles in an LHC context across health care systems during the COVID-19 pandemic.

OBJECTIVE: We share our experience and provide a framework for assembling and organizing multidisciplinary resources, structuring and regulating research needs, and developing a single source of truth (SSoT) for COVID-19 research by applying fundamental principles of health care digitization, in the context of LHC systems across a complex health care organization.

METHODS: Houston Methodist (HM) comprises eight tertiary care hospitals and an expansive primary care network across Greater Houston, Texas. During the early phase of the pandemic, institutional leadership envisioned the need to streamline COVID-19 research and established the retrospective research task force (RRTF). We describe an account of the structure, functioning, and productivity of the RRTF. We further elucidate the technical and structural details of a comprehensive data repository-the HM COVID-19 Surveillance and Outcomes Registry (CURATOR). We particularly highlight how CURATOR conforms to standard health care digitization principles in the LHC context.

RESULTS: The HM COVID-19 RRTF comprises expertise in epidemiology, health systems, clinical domains, data sciences, information technology, and research regulation. The RRTF initially convened in March 2020 to prioritize and streamline COVID-19 observational research; to date, it has reviewed over 60 protocols and made recommendations to the institutional review board (IRB). The RRTF also established the charter for CURATOR, which in itself was IRB-approved in April 2020. CURATOR is a relational structured query language database that is directly populated with data from electronic health records, via largely automated extract, transform, and load procedures. The CURATOR design enables longitudinal tracking of COVID-19 cases and controls before and after COVID-19 testing. CURATOR has been set up following the SSoT principle and is harmonized across other COVID-19 data sources. CURATOR eliminates data silos by leveraging unique and disparate big data sources for COVID-19 research and provides a platform to capitalize on institutional investment in cloud computing. It currently hosts deeply phenotyped sociodemographic, clinical, and outcomes data of approximately 200,000 individuals tested for COVID-19. It supports more than 30 IRB-approved protocols across several clinical domains and has generated numerous publications from its core and associated data sources.

CONCLUSIONS: A data-driven decision-making strategy is paramount to the success of health care organizations. Investment in cross-disciplinary expertise, health care technology, and leadership commitment are key ingredients to foster an LHC system. Such systems can mitigate the effects of ongoing and future health care catastrophes by providing timely and validated decision support.}, } @article {pmid33537385, year = {2021}, author = {Filippucci, M and Miccolis, S and Castagnozzi, A and Cecere, G and de Lorenzo, S and Donvito, G and Falco, L and Michele, M and Nicotri, S and Romeo, A and Selvaggi, G and Tallarico, A}, title = {Seismicity of the Gargano promontory (Southern Italy) after 7 years of local seismic network operation: Data release of waveforms from 2013 to 2018.}, journal = {Data in brief}, volume = {35}, number = {}, pages = {106783}, pmid = {33537385}, issn = {2352-3409}, abstract = {The University of Bari (Italy), in cooperation with the National Institute of Geophysics and Volcanology (INGV) (Italy), has installed the OTRIONS micro-earthquake network to better understand the active tectonics of the Gargano promontory (Southern Italy). The OTRIONS network operates since 2013 and consists of 12 short period, 3 components, seismic stations located in the Apulian territory (Southern Italy). This data article releases the waveform database collected from 2013 to 2018 and describes the characteristics of the local network in the current configuration. At the end of 2018, we implemented a cloud infrastructure to make more robust the acquisition and storage system of the network through a collaboration with the RECAS-Bari computing centre of the University of Bari (Italy) and of the National Institute of Nuclear Physics (Italy). Thanks to this implementation, waveforms recorded after the beginning of 2019 and the station metadata are accessible through the European Integrated Data Archive (EIDA, https://www.orfeus-eu.org/data/eida/nodes/INGV/).}, } @article {pmid33535432, year = {2021}, author = {Li, Z and Peng, E}, title = {Software-Defined Optimal Computation Task Scheduling in Vehicular Edge Networking.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {3}, pages = {}, pmid = {33535432}, issn = {1424-8220}, support = {BK20201415//Natural Science Foundation of Jiangsu Province/ ; 2017YFB1400703, 2020YFB1005503//National Key Research and Development Project/ ; U1736216, 61702233//National Natural Science Foundation of China/ ; }, abstract = {With the development of smart vehicles and various vehicular applications, Vehicular Edge Computing (VEC) paradigm has attracted from academic and industry. Compared with the cloud computing platform, VEC has several new features, such as the higher network bandwidth and the lower transmission delay. Recently, vehicular computation-intensive task offloading has become a new research field for the vehicular edge computing networks. However, dynamic network topology and the bursty computation tasks offloading, which causes to the computation load unbalancing for the VEC networking. To solve this issue, this paper proposed an optimal control-based computing task scheduling algorithm. Then, we introduce software defined networking/OpenFlow framework to build a software-defined vehicular edge networking structure. The proposed algorithm can obtain global optimum results and achieve the load-balancing by the virtue of the global load status information. Besides, the proposed algorithm has strong adaptiveness in dynamic network environments by automatic parameter tuning. Experimental results show that the proposed algorithm can effectively improve the utilization of computation resources and meet the requirements of computation and transmission delay for various vehicular tasks.}, } @article {pmid33532168, year = {2020}, author = {Uslu, BÇ and Okay, E and Dursun, E}, title = {Analysis of factors affecting IoT-based smart hospital design.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {9}, number = {1}, pages = {67}, pmid = {33532168}, issn = {2192-113X}, abstract = {Currently, rapidly developing digital technological innovations affect and change the integrated information management processes of all sectors. The high efficiency of these innovations has inevitably pushed the health sector into a digital transformation process to optimize the technologies and methodologies used to optimize healthcare management systems. In this transformation, the Internet of Things (IoT) technology plays an important role, which enables many devices to connect and work together. IoT allows systems to work together using sensors, connection methods, internet protocols, databases, cloud computing, and analytic as infrastructure. In this respect, it is necessary to establish the necessary technical infrastructure and a suitable environment for the development of smart hospitals. This study points out the optimization factors, challenges, available technologies, and opportunities, as well as the system architecture that come about by employing IoT technology in smart hospital environments. In order to do that, the required technical infrastructure is divided into five layers and the system infrastructure, constraints, and methods needed in each layer are specified, which also includes the smart hospital's dimensions and extent of intelligent computing and real-time big data analytic. As a result of the study, the deficiencies that may arise in each layer for the smart hospital design model and the factors that should be taken into account to eliminate them are explained. It is expected to provide a road map to managers, system developers, and researchers interested in optimization of the design of the smart hospital system.}, } @article {pmid33532167, year = {2020}, author = {Nguyen, V and Khanh, TT and Nguyen, TDT and Hong, CS and Huh, EN}, title = {Flexible computation offloading in a fuzzy-based mobile edge orchestrator for IoT applications.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {9}, number = {1}, pages = {66}, pmid = {33532167}, issn = {2192-113X}, abstract = {In the Internet of Things (IoT) era, the capacity-limited Internet and uncontrollable service delays for various new applications, such as video streaming analysis and augmented reality, are challenges. Cloud computing systems, also known as a solution that offloads energy-consuming computation of IoT applications to a cloud server, cannot meet the delay-sensitive and context-aware service requirements. To address this issue, an edge computing system provides timely and context-aware services by bringing the computations and storage closer to the user. The dynamic flow of requests that can be efficiently processed is a significant challenge for edge and cloud computing systems. To improve the performance of IoT systems, the mobile edge orchestrator (MEO), which is an application placement controller, was designed by integrating end mobile devices with edge and cloud computing systems. In this paper, we propose a flexible computation offloading method in a fuzzy-based MEO for IoT applications in order to improve the efficiency in computational resource management. Considering the network, computation resources, and task requirements, a fuzzy-based MEO allows edge workload orchestration actions to decide whether to offload a mobile user to local edge, neighboring edge, or cloud servers. Additionally, increasing packet sizes will affect the failed-task ratio when the number of mobile devices increases. To reduce failed tasks because of transmission collisions and to improve service times for time-critical tasks, we define a new input crisp value, and a new output decision for a fuzzy-based MEO. Using the EdgeCloudSim simulator, we evaluate our proposal with four benchmark algorithms in augmented reality, healthcare, compute-intensive, and infotainment applications. Simulation results show that our proposal provides better results in terms of WLAN delay, service times, the number of failed tasks, and VM utilization.}, } @article {pmid33531307, year = {2022}, author = {Tan, H and Wang, Y and Wu, M and Huang, Z and Miao, Z}, title = {Distributed Group Coordination of Multiagent Systems in Cloud Computing Systems Using a Model-Free Adaptive Predictive Control Strategy.}, journal = {IEEE transactions on neural networks and learning systems}, volume = {33}, number = {8}, pages = {3461-3473}, doi = {10.1109/TNNLS.2021.3053016}, pmid = {33531307}, issn = {2162-2388}, abstract = {This article studies the group coordinated control problem for distributed nonlinear multiagent systems (MASs) with unknown dynamics. Cloud computing systems are employed to divide agents into groups and establish networked distributed multigroup-agent systems (ND-MGASs). To achieve the coordination of all agents and actively compensate for communication network delays, a novel networked model-free adaptive predictive control (NMFAPC) strategy combining networked predictive control theory with model-free adaptive control method is proposed. In the NMFAPC strategy, each nonlinear agent is described as a time-varying data model, which only relies on the system measurement data for adaptive learning. To analyze the system performance, a simultaneous analysis method for stability and consensus of ND-MGASs is presented. Finally, the effectiveness and practicability of the proposed NMFAPC strategy are verified by numerical simulations and experimental examples. The achievement also provides a solution for the coordination of large-scale nonlinear MASs.}, } @article {pmid33525106, year = {2020}, author = {Su, K and Zhang, X and Liu, Q and Xiao, B}, title = {Strategies of similarity propagation in web service recommender systems.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {18}, number = {1}, pages = {530-550}, doi = {10.3934/mbe.2021029}, pmid = {33525106}, issn = {1551-0018}, abstract = {Recently, web service recommender systems have attracted much attention due to the popularity of Service-Oriented Computing and Cloud Computing. Memory-based collaborative filtering approaches which mainly rely on the similarity calculation are widely studied to realize the recommendation. In these research works, the similarity between two users is computed based on the QoS data of their commonly-invoked services and the similarity between two services is computed based on the common users who invoked them. However, most approaches ignore that the similarity calculation is not always accurate under a sparse data condition. To address this problem, we propose a similarity propagation method to accurately evaluate the similarities between users or services. Similarity propagation means that "if A and B are similar, and B and C are similar, then A and C will be similar to some extent". Firstly, the similarity graph of users or services is constructed according to the QoS data. Then, the similarity propagation paths between two nodes on the similarity graph are discovered. Finally, the similarity along each propagation path is measured and the indirect similarity between two users or services is evaluated by aggregating the similarities of different paths connecting them. Comprehensive experiments on real-world datasets demonstrate that our similarity propagation method can outstandingly improve the QoS prediction accuracy of memory-based collaborative filtering approaches.}, } @article {pmid33525084, year = {2020}, author = {Jiang, W and Ye, X and Chen, R and Su, F and Lin, M and Ma, Y and Zhu, Y and Huang, S}, title = {Wearable on-device deep learning system for hand gesture recognition based on FPGA accelerator.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {18}, number = {1}, pages = {132-153}, doi = {10.3934/mbe.2021007}, pmid = {33525084}, issn = {1551-0018}, mesh = {*Deep Learning ; Equipment Design ; Gestures ; Humans ; Neural Networks, Computer ; *Wearable Electronic Devices ; }, abstract = {Gesture recognition is critical in the field of Human-Computer Interaction, especially in healthcare, rehabilitation, sign language translation, etc. Conventionally, the gesture recognition data collected by the inertial measurement unit (IMU) sensors is relayed to the cloud or a remote device with higher computing power to train models. However, it is not convenient for remote follow-up treatment of movement rehabilitation training. In this paper, based on a field-programmable gate array (FPGA) accelerator and the Cortex-M0 IP core, we propose a wearable deep learning system that is capable of locally processing data on the end device. With a pre-stage processing module and serial-parallel hybrid method, the device is of low-power and low-latency at the micro control unit (MCU) level, however, it meets or exceeds the performance of single board computers (SBC). For example, its performance is more than twice as much of Cortex-A53 (which is usually used in Raspberry Pi). Moreover, a convolutional neural network (CNN) and a multilayer perceptron neural network (NN) is used in the recognition model to extract features and classify gestures, which helps achieve a high recognition accuracy at 97%. Finally, this paper offers a software-hardware co-design method that is worth referencing for the design of edge devices in other scenarios.}, } @article {pmid33513299, year = {2021}, author = {Neely, BA}, title = {Cloudy with a Chance of Peptides: Accessibility, Scalability, and Reproducibility with Cloud-Hosted Environments.}, journal = {Journal of proteome research}, volume = {20}, number = {4}, pages = {2076-2082}, pmid = {33513299}, issn = {1535-3907}, support = {9999-NIST/ImNIST/Intramural NIST DOC/United States ; }, mesh = {Animals ; Computational Biology ; Peptides ; *Proteomics ; Reproducibility of Results ; *Software ; }, abstract = {Cloud-hosted environments offer known benefits when computational needs outstrip affordable local workstations, enabling high-performance computation without a physical cluster. What has been less apparent, especially to novice users, is the transformative potential for cloud-hosted environments to bridge the digital divide that exists between poorly funded and well-resourced laboratories, and to empower modern research groups with remote personnel and trainees. Using cloud-based proteomic bioinformatic pipelines is not predicated on analyzing thousands of files, but instead can be used to improve accessibility during remote work, extreme weather, or working with under-resourced remote trainees. The general benefits of cloud-hosted environments also allow for scalability and encourage reproducibility. Since one possible hurdle to adoption is awareness, this paper is written with the nonexpert in mind. The benefits and possibilities of using a cloud-hosted environment are emphasized by describing how to setup an example workflow to analyze a previously published label-free data-dependent acquisition mass spectrometry data set of mammalian urine. Cost and time of analysis are compared using different computational tiers, and important practical considerations are described. Overall, cloud-hosted environments offer the potential to solve large computational problems, but more importantly can enable and accelerate research in smaller research groups with inadequate infrastructure and suboptimal local computational resources.}, } @article {pmid33511996, year = {2021}, author = {Alvarez, RV and Mariño-Ramírez, L and Landsman, D}, title = {Transcriptome annotation in the cloud: complexity, best practices, and cost.}, journal = {GigaScience}, volume = {10}, number = {2}, pages = {}, pmid = {33511996}, issn = {2047-217X}, mesh = {Cloud Computing ; Computational Biology ; Databases, Factual ; *Software ; *Transcriptome ; Workflow ; }, abstract = {BACKGROUND: The NIH Science and Technology Research Infrastructure for Discovery, Experimentation, and Sustainability (STRIDES) initiative provides NIH-funded researchers cost-effective access to commercial cloud providers, such as Amazon Web Services (AWS) and Google Cloud Platform (GCP). These cloud providers represent an alternative for the execution of large computational biology experiments like transcriptome annotation, which is a complex analytical process that requires the interrogation of multiple biological databases with several advanced computational tools. The core components of annotation pipelines published since 2012 are BLAST sequence alignments using annotated databases of both nucleotide or protein sequences almost exclusively with networked on-premises compute systems.

FINDINGS: We compare multiple BLAST sequence alignments using AWS and GCP. We prepared several Jupyter Notebooks with all the code required to submit computing jobs to the batch system on each cloud provider. We consider the consequence of the number of query transcripts in input files and the effect on cost and processing time. We tested compute instances with 16, 32, and 64 vCPUs on each cloud provider. Four classes of timing results were collected: the total run time, the time for transferring the BLAST databases to the instance local solid-state disk drive, the time to execute the CWL script, and the time for the creation, set-up, and release of an instance. This study aims to establish an estimate of the cost and compute time needed for the execution of multiple BLAST runs in a cloud environment.

CONCLUSIONS: We demonstrate that public cloud providers are a practical alternative for the execution of advanced computational biology experiments at low cost. Using our cloud recipes, the BLAST alignments required to annotate a transcriptome with ∼500,000 transcripts can be processed in <2 hours with a compute cost of ∼$200-$250. In our opinion, for BLAST-based workflows, the choice of cloud platform is not dependent on the workflow but, rather, on the specific details and requirements of the cloud provider. These choices include the accessibility for institutional use, the technical knowledge required for effective use of the platform services, and the availability of open source frameworks such as APIs to deploy the workflow.}, } @article {pmid33511044, year = {2020}, author = {Liu, X and Kar, B and Montiel Ishino, FA and Zhang, C and Williams, F}, title = {Assessing the Reliability of Relevant Tweets and Validation Using Manual and Automatic Approaches for Flood Risk Communication.}, journal = {ISPRS international journal of geo-information}, volume = {9}, number = {9}, pages = {}, pmid = {33511044}, issn = {2220-9964}, support = {ZIA MD000015/ImNIH/Intramural NIH HHS/United States ; }, abstract = {While Twitter has been touted as a preeminent source of up-to-date information on hazard events, the reliability of tweets is still a concern. Our previous publication extracted relevant tweets containing information about the 2013 Colorado flood event and its impacts. Using the relevant tweets, this research further examined the reliability (accuracy and trueness) of the tweets by examining the text and image content and comparing them to other publicly available data sources. Both manual identification of text information and automated (Google Cloud Vision, application programming interface (API)) extraction of images were implemented to balance accurate information verification and efficient processing time. The results showed that both the text and images contained useful information about damaged/flooded roads/streets. This information will help emergency response coordination efforts and informed allocation of resources when enough tweets contain geocoordinates or location/venue names. This research will identify reliable crowdsourced risk information to facilitate near real-time emergency response through better use of crowdsourced risk communication platforms.}, } @article {pmid33507965, year = {2021}, author = {Gaw, LY and Richards, DR}, title = {Development of spontaneous vegetation on reclaimed land in Singapore measured by NDVI.}, journal = {PloS one}, volume = {16}, number = {1}, pages = {e0245220}, pmid = {33507965}, issn = {1932-6203}, mesh = {Cities/history ; *Ecosystem ; History, 20th Century ; History, 21st Century ; *Plants ; Singapore ; *Urbanization ; }, abstract = {Population and economic growth in Asia has led to increased urbanisation. Urbanisation has many detrimental impacts on ecosystems, especially when expansion is unplanned. Singapore is a city-state that has grown rapidly since independence, both in population and land area. However, Singapore aims to develop as a 'City in Nature', and urban greenery is integral to the landscape. While clearing some areas of forest for urban sprawl, Singapore has also reclaimed land from the sea to expand its coastline. Reclaimed land is usually designated for future urban development, but must first be left for many years to stabilise. During the period of stabilisation, pioneer plant species establish, growing into novel forest communities. The rate of this spontaneous vegetation development has not been quantified. This study tracks the temporal trends of normalized difference vegetation index (NDVI), as a proxy of vegetation maturity, on reclaimed land sensed using LANDSAT images. Google Earth Engine was used to mosaic cloud-free annual LANDSAT images of Singapore from 1988 to 2015. Singapore's median NDVI increased by 0.15 from 0.47 to 0.62 over the study period, while its land area grew by 71 km2. Five reclaimed sites with spontaneous vegetation development showed variable vegetation covers, ranging from 6% to 43% vegetated cover in 2015. On average, spontaneous vegetation takes 16.9 years to develop to a maturity of 0.7 NDVI, but this development is not linear and follows a quadratic trajectory. Patches of spontaneous vegetation on isolated reclaimed lands are unlikely to remain forever since they are in areas slated for future development. In the years that these patches exist, they have potential to increase urban greenery, support biodiversity, and provide a host of ecosystem services. With this knowledge on spontaneous vegetation development trajectories, urban planners can harness the resource when planning future developments.}, } @article {pmid33504314, year = {2021}, author = {Raza, K and Singh, NK}, title = {A Tour of Unsupervised Deep Learning for Medical Image Analysis.}, journal = {Current medical imaging}, volume = {17}, number = {9}, pages = {1059-1077}, doi = {10.2174/1573405617666210127154257}, pmid = {33504314}, issn = {1573-4056}, mesh = {Algorithms ; *Deep Learning ; Humans ; Image Processing, Computer-Assisted ; Protein Structure, Secondary ; Unsupervised Machine Learning ; }, abstract = {BACKGROUND: Interpretation of medical images for the diagnosis and treatment of complex diseases from high-dimensional and heterogeneous data remains a key challenge in transforming healthcare. In the last few years, both supervised and unsupervised deep learning achieved promising results in the area of medical image analysis. Several reviews on supervised deep learning are published, but hardly any rigorous review on unsupervised deep learning for medical image analysis is available.

OBJECTIVE: The objective of this review is to systematically present various unsupervised deep learning models, tools, and benchmark datasets applied to medical image analysis. Some of the discussed models are autoencoders and their variants, Restricted Boltzmann Machines (RBM), Deep Belief Networks (DBN), Deep Boltzmann Machine (DBM), and Generative Adversarial Network (GAN). Future research opportunities and challenges of unsupervised deep learning techniques for medical image analysis are also discussed.

CONCLUSION: Currently, interpretation of medical images for diagnostic purposes is usually performed by human experts that may be replaced by computer-aided diagnosis due to advancement in machine learning techniques, including deep learning, and the availability of cheap computing infrastructure through cloud computing. Both supervised and unsupervised machine learning approaches are widely applied in medical image analysis, each of them having certain pros and cons. Since human supervisions are not always available or are inadequate or biased, therefore, unsupervised learning algorithms give a big hope with lots of advantages for biomedical image analysis.}, } @article {pmid33501269, year = {2020}, author = {Akbar, A and Lewis, PR and Wanner, E}, title = {A Self-Aware and Scalable Solution for Efficient Mobile-Cloud Hybrid Robotics.}, journal = {Frontiers in robotics and AI}, volume = {7}, number = {}, pages = {102}, pmid = {33501269}, issn = {2296-9144}, abstract = {Backed by the virtually unbounded resources of the cloud, battery-powered mobile robotics can also benefit from cloud computing, meeting the demands of even the most computationally and resource-intensive tasks. However, many existing mobile-cloud hybrid (MCH) robotic tasks are inefficient in terms of optimizing trade-offs between simultaneously conflicting objectives, such as minimizing both battery power consumption and network usage. To tackle this problem we propose a novel approach that can be used not only to instrument an MCH robotic task but also to search for its efficient configurations representing compromise solution between the objectives. We introduce a general-purpose MCH framework to measure, at runtime, how well the tasks meet these two objectives. The framework employs these efficient configurations to make decisions at runtime, which are based on: (1) changing of the environment (i.e., WiFi signal level variation), and (2) itself in a changing environment (i.e., actual observed packet loss in the network). Also, we introduce a novel search-based multi-objective optimization (MOO) algorithm, which works in two steps to search for efficient configurations of MCH applications. Analysis of our results shows that: (i) using self-adaptive and self-aware decisions, an MCH foraging task performed by a battery-powered robot can achieve better optimization in a changing environment than using static offloading or running the task only on the robot. However, a self-adaptive decision would fall behind when the change in the environment happens within the system. In such a case, a self-aware system can perform well, in terms of minimizing the two objectives. (ii) The Two-Step algorithm can search for better quality configurations for MCH robotic tasks of having a size from small to medium scale, in terms of the total number of their offloadable modules.}, } @article {pmid33500600, year = {2021}, author = {Sood, SK and Rawat, KS}, title = {A scientometric analysis of ICT-assisted disaster management.}, journal = {Natural hazards (Dordrecht, Netherlands)}, volume = {106}, number = {3}, pages = {2863-2881}, pmid = {33500600}, issn = {0921-030X}, abstract = {In recent years, natural and manmade disasters such as floods, earthquakes, wildfires, and tsunamis have occurred with human losses and environmental deterioration. Henceforth, to reduce the damage caused by these catastrophic events, the administration and government need to track victims and perform synchronized relief efforts on time at the disaster sites. The promising technologies of Internet communication technology (ICT), like the Internet of things, cloud computing, and data analytics, can assist various phases of disaster management. Moreover, the role of higher education spans all stages of disaster management: preparedness, response, and recovery. As educational and research contributions, higher educational institutes are essentially involved in all the disaster management stages to contribute to society broadly. Henceforth, the scientific analysis of disaster management literature is required to analyze the overall structure and developments in this domain. This study presents a scientometric analysis that evaluates the ICT-assisted disaster management research over the last 15 years (2005-2020). It presents various empirical ways to analyze the evolution, status, and result of ICT-assisted in disaster management research. This study provides extensive insight into the publication growth, citation analysis, collaboration, and keyword co-occurrence analysis for technological trends of the ICT-assisted disaster management research. It identifies key journals, countries, and organizations that significantly contributed to this research domain. Overall, this study presents various patterns, research trends, and collaborations as the basic structure for future research in this field.}, } @article {pmid33498910, year = {2021}, author = {Chen, S and Li, Q and Zhou, M and Abusorrah, A}, title = {Recent Advances in Collaborative Scheduling of Computing Tasks in an Edge Computing Paradigm.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {3}, pages = {}, pmid = {33498910}, issn = {1424-8220}, abstract = {In edge computing, edge devices can offload their overloaded computing tasks to an edge server. This can give full play to an edge server's advantages in computing and storage, and efficiently execute computing tasks. However, if they together offload all the overloaded computing tasks to an edge server, it can be overloaded, thereby resulting in the high processing delay of many computing tasks and unexpectedly high energy consumption. On the other hand, the resources in idle edge devices may be wasted and resource-rich cloud centers may be underutilized. Therefore, it is essential to explore a computing task collaborative scheduling mechanism with an edge server, a cloud center and edge devices according to task characteristics, optimization objectives and system status. It can help one realize efficient collaborative scheduling and precise execution of all computing tasks. This work analyzes and summarizes the edge computing scenarios in an edge computing paradigm. It then classifies the computing tasks in edge computing scenarios. Next, it formulates the optimization problem of computation offloading for an edge computing system. According to the problem formulation, the collaborative scheduling methods of computing tasks are then reviewed. Finally, future research issues for advanced collaborative scheduling in the context of edge computing are indicated.}, } @article {pmid33478558, year = {2021}, author = {Sun, S and Xie, Z and Yu, K and Jiang, B and Zheng, S and Pan, X}, title = {COVID-19 and healthcare system in China: challenges and progression for a sustainable future.}, journal = {Globalization and health}, volume = {17}, number = {1}, pages = {14}, pmid = {33478558}, issn = {1744-8603}, support = {201810343007//National College Students Innovation and Entrepreneurship Training Program/International ; S20190024//Wenzhou Municipal Science and Technology Bureau/International ; }, mesh = {Artificial Intelligence ; COVID-19/*epidemiology/*prevention & control ; China/epidemiology ; Disaster Planning/*organization & administration ; Disease Outbreaks/*prevention & control/statistics & numerical data ; Government ; Health Personnel/organization & administration ; Humans ; }, abstract = {With the ongoing COVID-19 outbreak, healthcare systems across the world have been pushed to the brink. The approach of traditional healthcare systems to disaster preparedness and prevention has demonstrated intrinsic problems, such as failure to detect early the spread of the virus, public hospitals being overwhelmed, a dire shortage of personal protective equipment, and exhaustion of healthcare workers. Consequently, this situation resulted in manpower and resource costs, leading to the widespread and exponential rise of infected cases at the early stage of the epidemic. To limit the spread of infection, the Chinese government adopted innovative, specialized, and advanced systems, including empowered Fangcang and Internet hospitals, as well as high technologies such as 5G, big data analysis, cloud computing, and artificial intelligence. The efficient use of these new forces helped China win its fight against the virus. As the rampant spread of the virus continues outside China, these new forces need to be integrated into the global healthcare system to combat the disease. Global healthcare system integrated with new forces is essential not only for COVID-19 but also for unknown infections in the future.}, } @article {pmid33477963, year = {2021}, author = {Pan, SH and Wang, SC}, title = {Optimal Consensus with Dual Abnormality Mode of Cellular IoT Based on Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {2}, pages = {}, pmid = {33477963}, issn = {1424-8220}, abstract = {The continuous development of fifth-generation (5G) networks is the main driving force for the growth of Internet of Things (IoT) applications. It is expected that the 5G network will greatly expand the applications of the IoT, thereby promoting the operation of cellular networks, the security and network challenges of the IoT, and pushing the future of the Internet to the edge. Because the IoT can make anything in anyplace be connected together at any time, it can provide ubiquitous services. With the establishment and use of 5G wireless networks, the cellular IoT (CIoT) will be developed and applied. In order to provide more reliable CIoT applications, a reliable network topology is very important. Reaching a consensus is one of the most important issues in providing a highly reliable CIoT design. Therefore, it is necessary to reach a consensus so that even if some components in the system is abnormal, the application in the system can still execute correctly in CIoT. In this study, a protocol of consensus is discussed in CIoT with dual abnormality mode that combines dormant abnormality and malicious abnormality. The protocol proposed in this research not only allows all normal components in CIoT to reach a consensus with the minimum times of data exchange, but also allows the maximum number of dormant and malicious abnormal components in CIoT. In the meantime, the protocol can make all normal components in CIoT satisfy the constraints of reaching consensus: Termination, Agreement, and Integrity.}, } @article {pmid33466730, year = {2021}, author = {Farid, F and Elkhodr, M and Sabrina, F and Ahamed, F and Gide, E}, title = {A Smart Biometric Identity Management Framework for Personalised IoT and Cloud Computing-Based Healthcare Services.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {2}, pages = {}, pmid = {33466730}, issn = {1424-8220}, mesh = {Biometry ; *Cloud Computing ; Computer Security ; Delivery of Health Care ; Humans ; *Internet of Things ; }, abstract = {This paper proposes a novel identity management framework for Internet of Things (IoT) and cloud computing-based personalized healthcare systems. The proposed framework uses multimodal encrypted biometric traits to perform authentication. It employs a combination of centralized and federated identity access techniques along with biometric based continuous authentication. The framework uses a fusion of electrocardiogram (ECG) and photoplethysmogram (PPG) signals when performing authentication. In addition to relying on the unique identification characteristics of the users' biometric traits, the security of the framework is empowered by the use of Homomorphic Encryption (HE). The use of HE allows patients' data to stay encrypted when being processed or analyzed in the cloud. Thus, providing not only a fast and reliable authentication mechanism, but also closing the door to many traditional security attacks. The framework's performance was evaluated and validated using a machine learning (ML) model that tested the framework using a dataset of 25 users in seating positions. Compared to using just ECG or PPG signals, the results of using the proposed fused-based biometric framework showed that it was successful in identifying and authenticating all 25 users with 100% accuracy. Hence, offering some significant improvements to the overall security and privacy of personalized healthcare systems.}, } @article {pmid33466338, year = {2021}, author = {Raghavan, A and Demircioglu, MA and Taeihagh, A}, title = {Public Health Innovation through Cloud Adoption: A Comparative Analysis of Drivers and Barriers in Japan, South Korea, and Singapore.}, journal = {International journal of environmental research and public health}, volume = {18}, number = {1}, pages = {}, pmid = {33466338}, issn = {1660-4601}, mesh = {*Cloud Computing ; Delivery of Health Care/*methods ; *Government ; Humans ; Japan ; *Public Health ; Republic of Korea ; Singapore ; }, abstract = {Governments are increasingly using cloud computing to reduce cost, increase access, improve quality, and create innovations in healthcare. Existing literature is primarily based on successful examples from developed western countries, and there is a lack of similar evidence from Asia. With a population close to 4.5 billion people, Asia faces healthcare challenges that pose an immense burden on economic growth and policymaking. Cloud computing in healthcare can potentially help increase the quality of healthcare delivery and reduce the economic burden, enabling governments to address healthcare challenges effectively and within a short timeframe. Advanced Asian countries such as Japan, South Korea, and Singapore provide successful examples of how cloud computing can be used to develop nationwide databases of electronic health records; real-time health monitoring for the elderly population; genetic database to support advanced research and cancer treatment; telemedicine; and health cities that drive the economy through medical industry, tourism, and research. This article examines these countries and identifies the drivers and barriers of cloud adoption in healthcare and makes policy recommendations to enable successful public health innovations through cloud adoption.}, } @article {pmid33465776, year = {2021}, author = {Anselmo, C and Attili, M and Horton, R and Kappe, B and Schulman, J and Baird, P}, title = {Hey You, Get On the Cloud: Safe and Compliant Use of Cloud Computing with Medical Devices.}, journal = {Biomedical instrumentation & technology}, volume = {55}, number = {1}, pages = {1-15}, pmid = {33465776}, issn = {0899-8205}, mesh = {*Cloud Computing ; *Internet ; }, } @article {pmid33456318, year = {2021}, author = {Patel, YS and Malwi, Z and Nighojkar, A and Misra, R}, title = {Truthful online double auction based dynamic resource provisioning for multi-objective trade-offs in IaaS clouds.}, journal = {Cluster computing}, volume = {24}, number = {3}, pages = {1855-1879}, pmid = {33456318}, issn = {1386-7857}, abstract = {Auction designs have recently been adopted for static and dynamic resource provisioning in IaaS clouds, such as Microsoft Azure and Amazon EC2. However, the existing mechanisms are mostly restricted to simple auctions, single-objective, offline setting, one-sided interactions either among cloud users or cloud service providers (CSPs), and possible misreports of cloud user's private information. This paper proposes a more realistic scenario of online auctioning for IaaS clouds, with the unique characteristics of elasticity for time-varying arrival of cloud user requests under the time-based server maintenance in cloud data centers. We propose an online truthful double auction technique for balancing the multi-objective trade-offs between energy, revenue, and performance in IaaS clouds, consisting of a weighted bipartite matching based winning-bid determination algorithm for resource allocation and a Vickrey-Clarke-Groves (VCG) driven algorithm for payment calculation of winning bids. Through rigorous theoretical analysis and extensive trace-driven simulation studies exploiting Google cluster workload traces, we demonstrate that our mechanism significantly improves the performance while promising truthfulness, heterogeneity, economic efficiency, individual rationality, and has a polynomial-time computational complexity.}, } @article {pmid33451105, year = {2021}, author = {Lee, YL and Arizky, SN and Chen, YR and Liang, D and Wang, WJ}, title = {High-Availability Computing Platform with Sensor Fault Resilience.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {2}, pages = {}, pmid = {33451105}, issn = {1424-8220}, support = {Next Generation IOT key Technologies and Application Systems In-depth Development Project (2/4)//Institute for Information Industry, Taiwan/ ; 108-2221-E- 008 -032 -MY3//Ministry of Science and Technology, Taiwan/ ; }, abstract = {Modern computing platforms usually use multiple sensors to report system information. In order to achieve high availability (HA) for the platform, the sensors can be used to efficiently detect system faults that make a cloud service not live. However, a sensor may fail and disable HA protection. In this case, human intervention is needed, either to change the original fault model or to fix the sensor fault. Therefore, this study proposes an HA mechanism that can continuously provide HA to a cloud system based on dynamic fault model reconstruction. We have implemented the proposed HA mechanism on a four-layer OpenStack cloud system and tested the performance of the proposed mechanism for all possible sets of sensor faults. For each fault model, we inject possible system faults and measure the average fault detection time. The experimental result shows that the proposed mechanism can accurately detect and recover an injected system fault with disabled sensors. In addition, the system fault detection time increases as the number of sensor faults increases, until the HA mechanism is degraded to a one-system-fault model, which is the worst case as the system layer heartbeating.}, } @article {pmid33451012, year = {2021}, author = {Lozano Domínguez, JM and Mateo Sanguino, TJ}, title = {Walking Secure: Safe Routing Planning Algorithm and Pedestrian's Crossing Intention Detector Based on Fuzzy Logic App.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {2}, pages = {}, pmid = {33451012}, issn = {1424-8220}, support = {"Industrialization of a Road Signaling Autonomous System for Smart Pedestrian Crosswalks" (ref. 5947)//Ministry of Economy and Knowledge of the Andalusian Government, Spain/ ; "Improvement of Road Safety Through an Intelligent Service Platform for Pedestrians, Sensors and Environment" (ref. UHU-1260596)//Ministry of Economy and Knowledge of the Andalusian Government, Spain/ ; }, abstract = {Improving road safety through artificial intelligence is now crucial to achieving more secure smart cities. With this objective, a mobile app based on the integration of the smartphone sensors and a fuzzy logic strategy to determine the pedestrian's crossing intention around crosswalks is presented. The app developed also allows the calculation, tracing and guidance of safe routes thanks to an optimization algorithm that includes pedestrian areas on the paths generated over the whole city through a cloud database (i.e., zebra crossings, pedestrian streets and walkways). The experimentation carried out consisted in testing the fuzzy logic strategy with a total of 31 volunteers crossing and walking around a crosswalk. For that, the fuzzy logic approach was subjected to a total of 3120 samples generated by the volunteers. It has been proven that a smartphone can be successfully used as a crossing intention detector system with an accuracy of 98.63%, obtaining a true positive rate of 98.27% and a specificity of 99.39% according to a receiver operating characteristic analysis. Finally, a total of 30 routes were calculated by the proposed algorithm and compared with Google Maps considering the values of time, distance and safety along the routes. As a result, the routes generated by the proposed algorithm were safer than the routes obtained with Google Maps, achieving an increase in the use of safe pedestrian areas of at least 183%.}, } @article {pmid33433860, year = {2021}, author = {Singh, K and Malhotra, J}, title = {Cloud based ensemble machine learning approach for smart detection of epileptic seizures using higher order spectral analysis.}, journal = {Physical and engineering sciences in medicine}, volume = {44}, number = {1}, pages = {313-324}, pmid = {33433860}, issn = {2662-4737}, mesh = {*Cloud Computing ; Electroencephalography ; *Epilepsy/diagnosis ; Humans ; Machine Learning ; Seizures ; }, abstract = {The present paper proposes a smart framework for detection of epileptic seizures using the concepts of IoT technologies, cloud computing and machine learning. This framework processes the acquired scalp EEG signals by Fast Walsh Hadamard transform. Then, the transformed frequency-domain signals are examined using higher-order spectral analysis to extract amplitude and entropy-based statistical features. The extracted features have been selected by means of correlation-based feature selection algorithm to achieve more real-time classification with reduced complexity and delay. Finally, the samples containing selected features have been fed to ensemble machine learning techniques for classification into several classes of EEG states, viz. normal, interictal and ictal. The employed techniques include Dagging, Bagging, Stacking, MultiBoost AB and AdaBoost M1 algorithms in integration with C4.5 decision tree algorithm as the base classifier. The results of the ensemble techniques are also compared with standalone C4.5 decision tree and SVM algorithms. The performance analysis through simulation results reveals that the ensemble of AdaBoost M1 and C4.5 decision tree algorithms with higher-order spectral features is an adequate technique for automated detection of epileptic seizures in real-time. This technique achieves 100% classification accuracy, sensitivity and specificity values with optimally small classification time.}, } @article {pmid33430386, year = {2021}, author = {Li, D and Xu, S and Li, P}, title = {Deep Reinforcement Learning-Empowered Resource Allocation for Mobile Edge Computing in Cellular V2X Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {2}, pages = {}, pmid = {33430386}, issn = {1424-8220}, support = {61571038//National Natural Science Foundation of China/ ; 61931001//National Natural Science Foundation of China/ ; 2020D06//National Mobile Communications Research Laboratory, Southeast University/ ; }, abstract = {With the rapid development of vehicular networks, vehicle-to-everything (V2X) communications have huge number of tasks to be calculated, which brings challenges to the scarce network resources. Cloud servers can alleviate the terrible situation regarding the lack of computing abilities of vehicular user equipment (VUE), but the limited resources, the dynamic environment of vehicles, and the long distances between the cloud servers and VUE induce some potential issues, such as extra communication delay and energy consumption. Fortunately, mobile edge computing (MEC), a promising computing paradigm, can ameliorate the above problems by enhancing the computing abilities of VUE through allocating the computational resources to VUE. In this paper, we propose a joint optimization algorithm based on a deep reinforcement learning algorithm named the double deep Q network (double DQN) to minimize the cost constituted of energy consumption, the latency of computation, and communication with the proper policy. The proposed algorithm is more suitable for dynamic scenarios and requires low-latency vehicular scenarios in the real world. Compared with other reinforcement learning algorithms, the algorithm we proposed algorithm improve the performance in terms of convergence, defined cost, and speed by around 30%, 15%, and 17%.}, } @article {pmid33426574, year = {2021}, author = {Santos, JA and Inácio, PRM and Silva, BMC}, title = {Towards the Use of Blockchain in Mobile Health Services and Applications.}, journal = {Journal of medical systems}, volume = {45}, number = {2}, pages = {17}, pmid = {33426574}, issn = {1573-689X}, mesh = {*Blockchain ; Computer Security ; Electronic Health Records ; Health Services ; Humans ; *Telemedicine ; }, abstract = {With the advent of cryptocurrencies and blockchain, the growth and adaptation of cryptographic features and capabilities were quickly extended to new and underexplored areas, such as healthcare. Currently, blockchain is being implemented mainly as a mechanism to secure Electronic Health Records (EHRs). However, new studies have shown that this technology can be a powerful tool in empowering patients to control their own health data, as well for enabling a fool-proof health data history and establishing medical responsibility. Additionally, with the proliferation of mobile health (m-Health) sustained on service-oriented architectures, the adaptation of blockchain mechanisms into m-Health applications creates the possibility for a more decentralized and available healthcare service. Hence, this paper presents a review of the current security best practices for m-Health and the most used and widely known implementations of the blockchain protocol, including blockchain technologies in m-Health. The main goal of this comprehensive review is to further discuss and elaborate on identified open-issues and potential use cases regarding the uses of blockchain in this area. Finally, the paper presents the major findings, challenges and advantages on future blockchain implementations for m-Health services and applications.}, } @article {pmid33422469, year = {2021}, author = {Mennen, AC and Turk-Browne, NB and Wallace, G and Seok, D and Jaganjac, A and Stock, J and deBettencourt, MT and Cohen, JD and Norman, KA and Sheline, YI}, title = {Cloud-Based Functional Magnetic Resonance Imaging Neurofeedback to Reduce the Negative Attentional Bias in Depression: A Proof-of-Concept Study.}, journal = {Biological psychiatry. Cognitive neuroscience and neuroimaging}, volume = {6}, number = {4}, pages = {490-497}, pmid = {33422469}, issn = {2451-9030}, support = {S10 OD023495/OD/NIH HHS/United States ; T32 MH065214/MH/NIMH NIH HHS/United States ; UL1 TR001863/TR/NCATS NIH HHS/United States ; }, mesh = {*Attentional Bias ; Cloud Computing ; Depression ; *Depressive Disorder, Major/therapy ; Humans ; Magnetic Resonance Imaging ; *Neurofeedback ; }, abstract = {Individuals with depression show an attentional bias toward negatively valenced stimuli and thoughts. In this proof-of-concept study, we present a novel closed-loop neurofeedback procedure intended to remediate this bias. Internal attentional states were detected in real time by applying machine learning techniques to functional magnetic resonance imaging data on a cloud server; these attentional states were externalized using a visual stimulus that the participant could learn to control. We trained 15 participants with major depressive disorder and 12 healthy control participants over 3 functional magnetic resonance imaging sessions. Exploratory analysis showed that participants with major depressive disorder were initially more likely than healthy control participants to get stuck in negative attentional states, but this diminished with neurofeedback training relative to controls. Depression severity also decreased from pre- to posttraining. These results demonstrate that our method is sensitive to the negative attentional bias in major depressive disorder and showcase the potential of this novel technique as a treatment that can be evaluated in future clinical trials.}, } @article {pmid33417125, year = {2021}, author = {Nowakowski, K and Carvalho, P and Six, JB and Maillet, Y and Nguyen, AT and Seghiri, I and M'Pemba, L and Marcille, T and Ngo, ST and Dao, TT}, title = {Human locomotion with reinforcement learning using bioinspired reward reshaping strategies.}, journal = {Medical & biological engineering & computing}, volume = {59}, number = {1}, pages = {243-256}, pmid = {33417125}, issn = {1741-0444}, mesh = {Adult ; *Artificial Intelligence ; Humans ; Learning ; Locomotion ; *Reinforcement, Psychology ; Reward ; }, abstract = {Recent learning strategies such as reinforcement learning (RL) have favored the transition from applied artificial intelligence to general artificial intelligence. One of the current challenges of RL in healthcare relates to the development of a controller to teach a musculoskeletal model to perform dynamic movements. Several solutions have been proposed. However, there is still a lack of investigations exploring the muscle control problem from a biomechanical point of view. Moreover, no studies using biological knowledge to develop plausible motor control models for pathophysiological conditions make use of reward reshaping. Consequently, the objective of the present work was to design and evaluate specific bioinspired reward function strategies for human locomotion learning within an RL framework. The deep deterministic policy gradient (DDPG) method for a single-agent RL problem was applied. A 3D musculoskeletal model (8 DoF and 22 muscles) of a healthy adult was used. A virtual interactive environment was developed and simulated using opensim-rl library. Three reward functions were defined for walking, forward, and side falls. The training process was performed with Google Cloud Compute Engine. The obtained outcomes were compared to the NIPS 2017 challenge outcomes, experimental observations, and literature data. Regarding learning to walk, simulated musculoskeletal models were able to walk from 18 to 20.5 m for the best solutions. A compensation strategy of muscle activations was revealed. Soleus, tibia anterior, and vastii muscles are main actors of the simple forward fall. A higher intensity of muscle activations was also noted after the fall. All kinematics and muscle patterns were consistent with experimental observations and literature data. Regarding the side fall, an intensive level of muscle activation on the expected fall side to unbalance the body was noted. The obtained outcomes suggest that computational and human resources as well as biomechanical knowledge are needed together to develop and evaluate an efficient and robust RL solution. As perspectives, current solutions will be extended to a larger parameter space in 3D. Furthermore, a stochastic reinforcement learning model will be investigated in the future in scope with the uncertainties of the musculoskeletal model and associated environment to provide a general artificial intelligence solution for human locomotion learning. Graphical abstract.}, } @article {pmid33414916, year = {2021}, author = {Chen, Y and Yan, W and Xie, Z and Guo, W and Lu, D and Lv, Z and Zhang, X}, title = {Comparative analysis of target gene exon sequencing by cognitive technology using a next generation sequencing platform in patients with lung cancer.}, journal = {Molecular and clinical oncology}, volume = {14}, number = {2}, pages = {36}, pmid = {33414916}, issn = {2049-9450}, abstract = {Next generation sequencing (NGS) technology is an increasingly important clinical tool for therapeutic decision-making. However, interpretation of NGS data presents challenges at the point of care, due to limitations in understanding the clinical importance of gene variants and efficiently translating results into actionable information for the clinician. The present study compared two approaches for annotating and reporting actionable genes and gene mutations from tumor samples: The traditional approach of manual curation, annotation and reporting using an experienced molecular tumor bioinformationist; and a cloud-based cognitive technology, with the goal to detect gene mutations of potential significance in Chinese patients with lung cancer. Data from 285 gene-targeted exon sequencing previously conducted on 115 patient tissue samples between 2014 and 2016 and subsequently manually annotated and evaluated by the Guangdong Lung Cancer Institute (GLCI) research team were analyzed by the Watson for Genomics (WfG) cognitive genomics technology. A comparative analysis of the annotation results of the two methods was conducted to identify quantitative and qualitative differences in the mutations generated. The complete congruence rate of annotation results between WfG analysis and the GLCI bioinformatician was 43.48%. In 65 (56.52%) samples, WfG analysis identified and interpreted, on average, 1.54 more mutation sites in each sample than the manual GLCI review. These mutation sites were located on 27 genes, including EP300, ARID1A, STK11 and DNMT3A. Mutations in the EP300 gene were most prevalent, and present in 30.77% samples. The Tumor Mutation Burden (TMB) interpreted by WfG analysis (1.82) was significantly higher than the TMB (0.73) interpreted by GLCI review. Compared with manual curation by a bioinformatician, WfG analysis provided comprehensive insights and additional genetic alterations to inform clinical therapeutic strategies for patients with lung cancer. These findings suggest the valuable role of cognitive computing to increase efficiency in the comprehensive detection and interpretation of genetic alterations which may inform opportunities for targeted cancer therapies.}, } @article {pmid33411624, year = {2021}, author = {Rajendran, S and Obeid, JS and Binol, H and D Agostino, R and Foley, K and Zhang, W and Austin, P and Brakefield, J and Gurcan, MN and Topaloglu, U}, title = {Cloud-Based Federated Learning Implementation Across Medical Centers.}, journal = {JCO clinical cancer informatics}, volume = {5}, number = {}, pages = {1-11}, pmid = {33411624}, issn = {2473-4276}, support = {P30 CA012197/CA/NCI NIH HHS/United States ; P30 DK123704/DK/NIDDK NIH HHS/United States ; UL1 TR001420/TR/NCATS NIH HHS/United States ; UL1 TR001450/TR/NCATS NIH HHS/United States ; }, mesh = {*Cloud Computing ; Humans ; *Information Dissemination ; Machine Learning ; Neural Networks, Computer ; *Privacy ; }, abstract = {PURPOSE: Building well-performing machine learning (ML) models in health care has always been exigent because of the data-sharing concerns, yet ML approaches often require larger training samples than is afforded by one institution. This paper explores several federated learning implementations by applying them in both a simulated environment and an actual implementation using electronic health record data from two academic medical centers on a Microsoft Azure Cloud Databricks platform.

MATERIALS AND METHODS: Using two separate cloud tenants, ML models were created, trained, and exchanged from one institution to another via a GitHub repository. Federated learning processes were applied to both artificial neural networks (ANNs) and logistic regression (LR) models on the horizontal data sets that are varying in count and availability. Incremental and cyclic federated learning models have been tested in simulation and real environments.

RESULTS: The cyclically trained ANN showed a 3% increase in performance, a significant improvement across most attempts (P < .05). Single weight neural network models showed improvement in some cases. However, LR models did not show much improvement after federated learning processes. The specific process that improved the performance differed based on the ML model and how federated learning was implemented. Moreover, we have confirmed that the order of the institutions during the training did influence the overall performance increase.

CONCLUSION: Unlike previous studies, our work has shown the implementation and effectiveness of federated learning processes beyond simulation. Additionally, we have identified different federated learning models that have achieved statistically significant performances. More work is needed to achieve effective federated learning processes in biomedicine, while preserving the security and privacy of the data.}, } @article {pmid33411623, year = {2021}, author = {Jones, DE and Alimi, TO and Pordell, P and Tangka, FK and Blumenthal, W and Jones, SF and Rogers, JD and Benard, VB and Richardson, LC}, title = {Pursuing Data Modernization in Cancer Surveillance by Developing a Cloud-Based Computing Platform: Real-Time Cancer Case Collection.}, journal = {JCO clinical cancer informatics}, volume = {5}, number = {}, pages = {24-29}, pmid = {33411623}, issn = {2473-4276}, mesh = {Automation ; Centers for Disease Control and Prevention, U.S. ; *Cloud Computing ; Computer Systems ; Data Collection/*methods ; Data Management/*methods ; Epidemiological Monitoring ; Health Policy ; Humans ; Neoplasms/*epidemiology ; Registries ; United States ; }, abstract = {Cancer surveillance is a field focused on collection of data to evaluate the burden of cancer and apply public health strategies to prevent and control cancer in the community. A key challenge facing the cancer surveillance community is the number of manual tasks required to collect cancer surveillance data, thereby resulting in possible delays in analysis and use of the information. To modernize and automate cancer data collection and reporting, the Centers for Disease Control and Prevention is planning, developing, and piloting a cancer surveillance cloud-based computing platform (CS-CBCP) with standardized electronic reporting from laboratories and health-care providers. With this system, automation of the cancer case collection process and access to real-time cancer case data can be achieved, which could not be done before. Furthermore, the COVID-19 pandemic has illustrated the importance of continuity of operations plans, and the CS-CBCP has the potential to provide such a platform suitable for remote operations of central cancer registries.}, } @article {pmid33409205, year = {2020}, author = {Chattopadhyay, T and Mondal, H and Mondal, S and Dutta, R and Saha, K and Das, D}, title = {Prescription digitization, online preservation, and retrieval on a smartphone.}, journal = {Journal of family medicine and primary care}, volume = {9}, number = {10}, pages = {5295-5302}, pmid = {33409205}, issn = {2249-4863}, abstract = {BACKGROUND: Medical records are important documents that should be stored for at least 3 years after the commencement of the treatment of an adult patient in India. In a health care facility, patients' data is saved in an online or offline retrieval system. However, in the case of the primary care physician, the data is not commonly kept in an easily retrievable system.

AIM: To test the feasibility of using a set of free web-based services in digitization, preservation, and retrieval of prescription on a smartphone by primary care physicians.

METHODS: This study was conducted with 12 primary care physicians. They were provided hands-on guides on creating an online form for uploading a prescription and using an application for retrieval of the prescription on a smartphone. Their feedback on the training material was collected by a telephonic survey, which had a 10-point Likert-type response option. Then, an in-depth interview was conducted to ascertain their perception on the tutorial and the process of digitization and retrieval system.

RESULTS: All of the participants were able to create an online form on their smartphone. They uploaded their prescription and associated data and were able to retrieve it. The physicians opined positively on the "cost of the system," "portability" on a smartphone and ease of the "tutorial". They opined negatively on the "limited storage," chances of "loss of data," and "time constraints" for entry of the patients' data.

CONCLUSION: Free web-based and smartphone applications can be used by a primary care physician for personal storage and retrieval of prescriptions. The simple tutorial presented in this article would help many primary care physicians in resource-limited settings.}, } @article {pmid33408373, year = {2021}, author = {Feldmann, J and Youngblood, N and Karpov, M and Gehring, H and Li, X and Stappers, M and Le Gallo, M and Fu, X and Lukashchuk, A and Raja, AS and Liu, J and Wright, CD and Sebastian, A and Kippenberg, TJ and Pernice, WHP and Bhaskaran, H}, title = {Parallel convolutional processing using an integrated photonic tensor core.}, journal = {Nature}, volume = {589}, number = {7840}, pages = {52-58}, pmid = {33408373}, issn = {1476-4687}, support = {/ERC_/European Research Council/International ; }, abstract = {With the proliferation of ultrahigh-speed mobile networks and internet-connected devices, along with the rise of artificial intelligence (AI)[1], the world is generating exponentially increasing amounts of data that need to be processed in a fast and efficient way. Highly parallelized, fast and scalable hardware is therefore becoming progressively more important[2]. Here we demonstrate a computationally specific integrated photonic hardware accelerator (tensor core) that is capable of operating at speeds of trillions of multiply-accumulate operations per second (10[12] MAC operations per second or tera-MACs per second). The tensor core can be considered as the optical analogue of an application-specific integrated circuit (ASIC). It achieves parallelized photonic in-memory computing using phase-change-material memory arrays and photonic chip-based optical frequency combs (soliton microcombs[3]). The computation is reduced to measuring the optical transmission of reconfigurable and non-resonant passive components and can operate at a bandwidth exceeding 14 gigahertz, limited only by the speed of the modulators and photodetectors. Given recent advances in hybrid integration of soliton microcombs at microwave line rates[3-5], ultralow-loss silicon nitride waveguides[6,7], and high-speed on-chip detectors and modulators, our approach provides a path towards full complementary metal-oxide-semiconductor (CMOS) wafer-scale integration of the photonic tensor core. Although we focus on convolutional processing, more generally our results indicate the potential of integrated photonics for parallel, fast, and efficient computational hardware in data-heavy AI applications such as autonomous driving, live video processing, and next-generation cloud computing services.}, } @article {pmid33407445, year = {2021}, author = {Bertuccio, S and Tardiolo, G and Giambò, FM and Giuffrè, G and Muratore, R and Settimo, C and Raffa, A and Rigano, S and Bramanti, A and Muscarà, N and De Cola, MC}, title = {ReportFlow: an application for EEG visualization and reporting using cloud platform.}, journal = {BMC medical informatics and decision making}, volume = {21}, number = {1}, pages = {7}, pmid = {33407445}, issn = {1472-6947}, mesh = {*Cloud Computing ; Computer Security ; Electroencephalography ; *Electronic Health Records ; Humans ; Information Dissemination ; }, abstract = {BACKGROUND: The cloud is a promising resource for data sharing and computing. It can optimize several legacy processes involving different units of a company or more companies. Recently, cloud technology applications are spreading out in the healthcare setting as well, allowing to cut down costs for physical infrastructures and staff movements. In a public environment the main challenge is to guarantee the patients' data protection. We describe a cloud-based system, named ReportFlow, developed with the aim to improve the process of reporting and delivering electroencephalograms.

METHODS: We illustrate the functioning of this application through a use-case scenario occurring in an Italian hospital, and describe the corresponding key encryption and key management used for data security guarantee. We used the X[2] test or the unpaired Student t test to perform pre-post comparisons of some indexes, in order to evaluate significant changes after the application of ReportFlow.

RESULTS: The results obtained through the use of ReportFlow show a reduction of the time for exam reporting (t = 19.94; p < 0.001) and for its delivering (t = 14.95; p < 0.001), as well as an increase of the number of neurophysiologic examinations performed (about 20%), guaranteeing data integrity and security. Moreover, 68% of exam reports were delivered completely digitally.

CONCLUSIONS: The application resulted to be an optimal solution to optimize the legacy process adopted in this scenario. The comparative pre-post analysis showed promising preliminary results of performance. Future directions will be the creation and release of certificates automatically.}, } @article {pmid33406662, year = {2021}, author = {Li, J and Qiao, Z and Zhang, K and Cui, C}, title = {A Lattice-Based Homomorphic Proxy Re-Encryption Scheme with Strong Anti-Collusion for Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {1}, pages = {}, pmid = {33406662}, issn = {1424-8220}, support = {LH2020F044//Heilongjiang Provincial Natural Science Foundation of China/ ; }, abstract = {The homomorphic proxy re-encryption scheme combines the characteristics of a homomorphic encryption scheme and proxy re-encryption scheme. The proxy can not only convert a ciphertext of the delegator into a ciphertext of the delegatee, but also can homomorphically calculate the original ciphertext and re-encryption ciphertext belonging to the same user, so it is especially suitable for cloud computing. Yin et al. put forward the concept of a strong collusion attack on a proxy re-encryption scheme, and carried out a strong collusion attack on the scheme through an example. The existing homomorphic proxy re-encryption schemes use key switching algorithms to generate re-encryption keys, so it can not resist strong collusion attack. In this paper, we construct the first lattice-based homomorphic proxy re-encryption scheme with strong anti-collusion (HPRE-SAC). Firstly, algorithm TrapGen is used to generate an encryption key and trapdoor, then trapdoor sampling is used to generate a decryption key and re-encryption key, respectively. Finally, in order to ensure the homomorphism of ciphertext, a key switching algorithm is only used to generate the evaluation key. Compared with the existing homomorphic proxy re-encryption schemes, our HPRE-SAC scheme not only can resist strong collusion attacks, but also has smaller parameters.}, } @article {pmid33404529, year = {2021}, author = {Coelho, AA}, title = {Ab initio structure solution of proteins at atomic resolution using charge-flipping techniques and cloud computing.}, journal = {Acta crystallographica. Section D, Structural biology}, volume = {77}, number = {Pt 1}, pages = {98-107}, doi = {10.1107/S2059798320015090}, pmid = {33404529}, issn = {2059-7983}, mesh = {*Cloud Computing ; Internet ; Protein Conformation ; Proteins/*chemistry ; *Software ; }, abstract = {Large protein structures at atomic resolution can be solved in minutes using charge-flipping techniques operating on hundreds of virtual machines (computers) on the Amazon Web Services cloud-computing platform driven by the computer programs TOPAS or TOPAS-Academic at a small financial cost. The speed of operation has allowed charge-flipping techniques to be investigated and modified, leading to two strategies that can solve a large range of difficult protein structures at atomic resolution. Techniques include the use of space-group symmetry restraints on the electron density as well as increasing the intensity of a randomly chosen high-intensity electron-density peak. It is also shown that the use of symmetry restraints increases the chance of finding a solution for low-resolution data. Finally, a flipping strategy that negates `uranium atom solutions' has been developed for structures that exhibit such solutions during charge flipping.}, } @article {pmid33401409, year = {2021}, author = {Tian, X and Zhu, J and Xu, T and Li, Y}, title = {Mobility-Included DNN Partition Offloading from Mobile Devices to Edge Clouds.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {1}, pages = {}, pmid = {33401409}, issn = {1424-8220}, abstract = {The latest results in Deep Neural Networks (DNNs) have greatly improved the accuracy and performance of a variety of intelligent applications. However, running such computation-intensive DNN-based applications on resource-constrained mobile devices definitely leads to long latency and huge energy consumption. The traditional way is performing DNNs in the central cloud, but it requires significant amounts of data to be transferred to the cloud over the wireless network and also results in long latency. To solve this problem, offloading partial DNN computation to edge clouds has been proposed, to realize the collaborative execution between mobile devices and edge clouds. In addition, the mobility of mobile devices is easily to cause the computation offloading failure. In this paper, we develop a mobility-included DNN partition offloading algorithm (MDPO) to adapt to user's mobility. The objective of MDPO is minimizing the total latency of completing a DNN job when the mobile user is moving. The MDPO algorithm is suitable for both DNNs with chain topology and graphic topology. We evaluate the performance of our proposed MDPO compared to local-only execution and edge-only execution, experiments show that MDPO significantly reduces the total latency and improves the performance of DNN, and MDPO can adjust well to different network conditions.}, } @article {pmid33399819, year = {2021}, author = {Yun, T and Li, H and Chang, PC and Lin, MF and Carroll, A and McLean, CY}, title = {Accurate, scalable cohort variant calls using DeepVariant and GLnexus.}, journal = {Bioinformatics (Oxford, England)}, volume = {36}, number = {24}, pages = {5582-5589}, pmid = {33399819}, issn = {1367-4811}, support = {U01 HG007301/HG/NHGRI NIH HHS/United States ; U01 HG007417/HG/NHGRI NIH HHS/United States ; UM1 HG008901/HG/NHGRI NIH HHS/United States ; 3UM1HG008901-03S1/HG/NHGRI NIH HHS/United States ; //Google LLC/ ; }, abstract = {MOTIVATION: Population-scale sequenced cohorts are foundational resources for genetic analyses, but processing raw reads into analysis-ready cohort-level variants remains challenging.

RESULTS: We introduce an open-source cohort-calling method that uses the highly accurate caller DeepVariant and scalable merging tool GLnexus. Using callset quality metrics based on variant recall and precision in benchmark samples and Mendelian consistency in father-mother-child trios, we optimize the method across a range of cohort sizes, sequencing methods and sequencing depths. The resulting callsets show consistent quality improvements over those generated using existing best practices with reduced cost. We further evaluate our pipeline in the deeply sequenced 1000 Genomes Project (1KGP) samples and show superior callset quality metrics and imputation reference panel performance compared to an independently generated GATK Best Practices pipeline.

We publicly release the 1KGP individual-level variant calls and cohort callset (https://console.cloud.google.com/storage/browser/brain-genomics-public/research/cohort/1KGP) to foster additional development and evaluation of cohort merging methods as well as broad studies of genetic variation. Both DeepVariant (https://github.com/google/deepvariant) and GLnexus (https://github.com/dnanexus-rnd/GLnexus) are open-source, and the optimized GLnexus setup discovered in this study is also integrated into GLnexus public releases v1.2.2 and later.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid33399126, year = {2021}, author = {Yang, L and Culbertson, EA and Thomas, NK and Vuong, HT and Kjær, ETS and Jensen, KMØ and Tucker, MG and Billinge, SJL}, title = {A cloud platform for atomic pair distribution function analysis: PDFitc.}, journal = {Acta crystallographica. Section A, Foundations and advances}, volume = {77}, number = {Pt 1}, pages = {2-6}, pmid = {33399126}, issn = {2053-2733}, support = {DMREF-1534910//National Science Foundation, Division of Materials Research/ ; DE-AC05-00OR22725//U.S. Department of Energy, Neutron Science Directorate/ ; 804066//H2020 European Research Council/ ; }, abstract = {A cloud web platform for analysis and interpretation of atomic pair distribution function (PDF) data (PDFitc) is described. The platform is able to host applications for PDF analysis to help researchers study the local and nanoscale structure of nanostructured materials. The applications are designed to be powerful and easy to use and can, and will, be extended over time through community adoption and development. The currently available PDF analysis applications, structureMining, spacegroupMining and similarityMapping, are described. In the first and second the user uploads a single PDF and the application returns a list of best-fit candidate structures, and the most likely space group of the underlying structure, respectively. In the third, the user can upload a set of measured or calculated PDFs and the application returns a matrix of Pearson correlations, allowing assessment of the similarity between different data sets. structureMining is presented here as an example to show the easy-to-use workflow on PDFitc. In the future, as well as using the PDFitc applications for data analysis, it is hoped that the community will contribute their own codes and software to the platform.}, } @article {pmid33395689, year = {2021}, author = {Lima, MS}, title = {Information theory inspired optimization algorithm for efficient service orchestration in distributed systems.}, journal = {PloS one}, volume = {16}, number = {1}, pages = {e0242285}, pmid = {33395689}, issn = {1932-6203}, mesh = {*Algorithms ; *Computer Communication Networks ; *Computer Simulation ; *Data Management ; Information Theory ; }, abstract = {Distributed Systems architectures are becoming the standard computational model for processing and transportation of information, especially for Cloud Computing environments. The increase in demand for application processing and data management from enterprise and end-user workloads continues to move from a single-node client-server architecture to a distributed multitier design where data processing and transmission are segregated. Software development must considerer the orchestration required to provision its core components in order to deploy the services efficiently in many independent, loosely coupled-physically and virtually interconnected-data centers spread geographically, across the globe. This network routing challenge can be modeled as a variation of the Travelling Salesman Problem (TSP). This paper proposes a new optimization algorithm for optimum route selection using Algorithmic Information Theory. The Kelly criterion for a Shannon-Bernoulli process is used to generate a reliable quantitative algorithm to find a near optimal solution tour. The algorithm is then verified by comparing the results with benchmark heuristic solutions in 3 test cases. A statistical analysis is designed to measure the significance of the results between the algorithms and the entropy function can be derived from the distribution. The tested results shown an improvement in the solution quality by producing routes with smaller length and time requirements. The quality of the results proves the flexibility of the proposed algorithm for problems with different complexities without relying in nature-inspired models such as Genetic Algorithms, Ant Colony, Cross Entropy, Neural Networks, 2opt and Simulated Annealing. The proposed algorithm can be used by applications to deploy services across large cluster of nodes by making better decision in the route design. The findings in this paper unifies critical areas in Computer Science, Mathematics and Statistics that many researchers have not explored and provided a new interpretation that advances the understanding of the role of entropy in decision problems encoded in Turing Machines.}, } @article {pmid33394397, year = {2021}, author = {Khan, R and Gilani, H}, title = {Global drought monitoring with big geospatial datasets using Google Earth Engine.}, journal = {Environmental science and pollution research international}, volume = {28}, number = {14}, pages = {17244-17264}, pmid = {33394397}, issn = {1614-7499}, mesh = {Australia ; Brazil ; *Droughts ; *Meteorology ; Thailand ; }, abstract = {Drought or dryness occurs due to the accumulative effect of certain climatological and hydrological variables over a certain period. Droughts are studied through numerically computed simple or compound indices. Vegetation condition index (VCI) is used for observing the change in vegetation that causes agricultural drought. Since the land surface temperature has minimum influence from cloud contamination and humidity in the air, so the temperature condition index (TCI) is used for studying the temperature change. Dryness or wetness of soil is a major indicator for agriculture and hydrological drought and for that purpose, the index, soil moisture condition index (SMCI), is computed. The deviation of precipitation from normal is a major cause for meteorological droughts and for that purpose, precipitation condition index (PCI) is computed. The years when the indices escalated the dryness situation to severe and extreme are pointed out in this research. Furthermore, an interactive dashboard is generated in the Google Earth Engine (GEE) for users to compute the said indices using country boundary, time period, and ecological mask of their choice: Agriculture Drought Monitoring. Apart from global results, three case studies of droughts (2002 in Australia, 2013 in Brazil, and 2019 in Thailand) computed via the dashboard are discussed in detail in this research.}, } @article {pmid33389466, year = {2021}, author = {Yadav, S and Luthra, S and Garg, D}, title = {Modelling Internet of things (IoT)-driven global sustainability in multi-tier agri-food supply chain under natural epidemic outbreaks.}, journal = {Environmental science and pollution research international}, volume = {28}, number = {13}, pages = {16633-16654}, pmid = {33389466}, issn = {1614-7499}, mesh = {*COVID-19 ; Disease Outbreaks ; *Epidemics ; Food Supply ; Humans ; *Internet of Things ; SARS-CoV-2 ; }, abstract = {Epidemic outbreak (COVID-19, SARS-CoV-2) is an exceptional scenario of agri-food supply chain (AFSC) risk at the globalised level which is characterised by logistics' network breakdown (ripple effects), demand mismatch (uncertainty), and sustainable issues. Thus, the aim of this research is the modelling of the sustainable based multi-tier system for AFSC, which is managed through the different emerging application of Internet of things (IoT) technology. Different IoT technologies, viz., Blockchain, robotics, Big data analysis, and cloud computing, have developed a competitive AFSC at the global level. Competitive AFSC needs cautious incorporation of multi-tiers suppliers, specifically during dealing with globalised sustainability issues. Firms have been advancing towards their multi suppliers for driving social, environments and economical practices. This paper also studies the interrelationship of 14 enablers and their cause and effect magnitude as contributing to IoT-based food secure model. The methodology used in the paper is interpretative structural modelling (ISM) for establishing interrelationship among the enablers and Fuzzy-Decision-Making Trial and Evaluation Laboratory (F-DEMATEL) to provide the magnitude of the cause-effect strength of the hierarchical framework. This paper also provides some theoretical contribution supported by information processing theory (IPT) and dynamic capability theory (DCT). This paper may guide the organisation's managers in their strategic planning based on enabler's classification into cause and effect groups. This paper may also encourage the mangers for implementing IoT technologies in AFSC.}, } @article {pmid33382884, year = {2020}, author = {Khomtchouk, BB and Nelson, CS and Vand, KA and Palmisano, S and Grossman, RL}, title = {HeartBioPortal2.0: new developments and updates for genetic ancestry and cardiometabolic quantitative traits in diverse human populations.}, journal = {Database : the journal of biological databases and curation}, volume = {2020}, number = {}, pages = {}, pmid = {33382884}, issn = {1758-0463}, support = {T32 AG047126/AG/NIA NIH HHS/United States ; }, mesh = {*Cardiovascular Diseases/genetics ; Female ; Genetic Predisposition to Disease ; *Genome-Wide Association Study ; Genomics ; Humans ; Male ; Phenotype ; }, abstract = {Cardiovascular disease (CVD) is the leading cause of death worldwide for all genders and across most racial and ethnic groups. However, different races and ethnicities exhibit different rates of CVD and its related cardiorenal and metabolic comorbidities, suggesting differences in genetic predisposition and risk of onset, as well as socioeconomic and lifestyle factors (diet, exercise, etc.) that act upon an individual's unique underlying genetic background. Here, we present HeartBioPortal2.0, a major update to HeartBioPortal, the world's largest CVD genetics data precision medicine platform for harmonized CVD-relevant genetic variants, which now enables search and analysis of human genetic information related to heart disease across ethnically diverse populations and cardiovascular/renal/metabolic quantitative traits pertinent to CVD pathophysiology. HeartBioPortal2.0 is structured as a cloud-based computing platform and knowledge portal that consolidates a multitude of CVD-relevant genomic data modalities into a single powerful query and browsing interface between data and user via a user-friendly web application publicly available to the scientific research community. Since its initial release, HeartBioPortal2.0 has added new cardiovascular/renal/metabolic disease-relevant gene expression data as well as genetic association data from numerous large-scale genome-wide association study consortiums such as CARDIoGRAMplusC4D, TOPMed, FinnGen, AFGen, MESA, MEGASTROKE, UK Biobank, CHARGE, Biobank Japan and MyCode, among other studies. In addition, HeartBioPortal2.0 now includes support for quantitative traits and ethnically diverse populations, allowing users to investigate the shared genetic architecture of any gene or its variants across the continuous cardiometabolic spectrum from health (e.g. blood pressure traits) to disease (e.g. hypertension), facilitating the understanding of CVD trait genetics that inform health-to-disease transitions and endophenotypes. Custom visualizations in the new and improved user interface, including performance enhancements and new security features such as user authentication, collectively re-imagine HeartBioPortal's user experience and provide a data commons that co-locates data, storage and computing infrastructure in the context of studying the genetic basis behind the leading cause of global mortality. Database URL: https://www.heartbioportal.com/.}, } @article {pmid33378901, year = {2020}, author = {Halty, A and Sánchez, R and Vázquez, V and Viana, V and Piñeyro, P and Rossit, DA}, title = {Scheduling in cloud manufacturing systems: Recent systematic literature review.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {17}, number = {6}, pages = {7378-7397}, doi = {10.3934/mbe.2020377}, pmid = {33378901}, issn = {1551-0018}, abstract = {Cloud Manufacturing (CMFg) is a novel production paradigm that benefits from Cloud Computing in order to develop manufacturing systems linked by the cloud. These systems, based on virtual platforms, allow direct linkage between customers and suppliers of manufacturing services, regardless of geographical distance. In this way, CMfg can expand both markets for producers, and suppliers for customers. However, these linkages imply a new challenge for production planning and decision-making process, especially in Scheduling. In this paper, a systematic literature review of articles addressing scheduling in Cloud Manufacturing environments is carried out. The review takes as its starting point a seminal study published in 2019, in which all problem features are described in detail. We pay special attention to the optimization methods and problem-solving strategies that have been suggested in CMfg scheduling. From the review carried out, we can assert that CMfg is a topic of growing interest within the scientific community. We also conclude that the methods based on bio-inspired metaheuristics are by far the most widely used (they represent more than 50% of the articles found). On the other hand, we suggest some lines for future research to further consolidate this field. In particular, we want to highlight the multi-objective approach, since due to the nature of the problem and the production paradigm, the optimization objectives involved are generally in conflict. In addition, decentralized approaches such as those based on game theory are promising lines for future research.}, } @article {pmid33374965, year = {2020}, author = {Sahlmann, K and Clemens, V and Nowak, M and Schnor, B}, title = {MUP: Simplifying Secure Over-The-Air Update with MQTT for Constrained IoT Devices.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {1}, pages = {}, pmid = {33374965}, issn = {1424-8220}, abstract = {Message Queuing Telemetry Transport (MQTT) is one of the dominating protocols for edge- and cloud-based Internet of Things (IoT) solutions. When a security vulnerability of an IoT device is known, it has to be fixed as soon as possible. This requires a firmware update procedure. In this paper, we propose a secure update protocol for MQTT-connected devices which ensures the freshness of the firmware, authenticates the new firmware and considers constrained devices. We show that the update protocol is easy to integrate in an MQTT-based IoT network using a semantic approach. The feasibility of our approach is demonstrated by a detailed performance analysis of our prototype implementation on a IoT device with 32 kB RAM. Thereby, we identify design issues in MQTT 5 which can help to improve the support of constrained devices.}, } @article {pmid33374599, year = {2020}, author = {Asif, R and Ghanem, K and Irvine, J}, title = {Proof-of-PUF Enabled Blockchain: Concurrent Data and Device Security for Internet-of-Energy.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {1}, pages = {}, pmid = {33374599}, issn = {1424-8220}, abstract = {A detailed review on the technological aspects of Blockchain and Physical Unclonable Functions (PUFs) is presented in this article. It stipulates an emerging concept of Blockchain that integrates hardware security primitives via PUFs to solve bandwidth, integration, scalability, latency, and energy requirements for the Internet-of-Energy (IoE) systems. This hybrid approach, hereinafter termed as PUFChain, provides device and data provenance which records data origins, history of data generation and processing, and clone-proof device identification and authentication, thus possible to track the sources and reasons of any cyber attack. In addition to this, we review the key areas of design, development, and implementation, which will give us the insight on seamless integration with legacy IoE systems, reliability, cyber resilience, and future research challenges.}, } @article {pmid33374340, year = {2020}, author = {Lin, HY and Hung, YM}, title = {An Improved Proxy Re-Encryption Scheme for IoT-Based Data Outsourcing Services in Clouds.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {1}, pages = {}, pmid = {33374340}, issn = {1424-8220}, abstract = {IoT-based data outsourcing services in clouds could be regarded as a new trend in recent years, as they could reduce the hardware and software cost for enterprises and obtain higher flexibility. To securely transfer an encrypted message in the cloud, a so-called proxy re-encryption scheme is a better alternative. In such schemes, a ciphertext designated for a data aggregation is able to be re-encrypted as one designated for another by a semi-trusted proxy without decryption. In this paper, we introduce a secure proxy re-encryption protocol for IoT-based data outsourcing services in clouds. The proposed scheme is provably secure assuming the hardness of the bilinear inverse Diffie-Hellman problem (BIDHP). In particular, our scheme is bidirectional and supports the functionality of multi-hop, which allows an uploaded ciphertext to be transformed into a different one multiple times. The ciphertext length of our method is independent of the number of involved IoT nodes. Specifically, the re-encryption process only takes one exponentiation computation which is around 54 ms when sharing the data with 100 IoT devices. For each IoT node, the decryption process only requires two exponentiation computations. When compared with a related protocol presented by Kim and Lee, the proposed one also exhibits lower computational costs.}, } @article {pmid33374270, year = {2020}, author = {Abbas, Q and Alsheddy, A}, title = {Driver Fatigue Detection Systems Using Multi-Sensors, Smartphone, and Cloud-Based Computing Platforms: A Comparative Analysis.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {1}, pages = {}, pmid = {33374270}, issn = {1424-8220}, support = {0001-008-11-17-3//King Abdulaziz City for Science and Technology/ ; }, mesh = {*Automobile Driving ; Cloud Computing ; *Internet of Things ; Machine Learning ; *Monitoring, Physiologic ; *Smartphone ; }, abstract = {Internet of things (IoT) cloud-based applications deliver advanced solutions for smart cities to decrease traffic accidents caused by driver fatigue while driving on the road. Environmental conditions or driver behavior can ultimately lead to serious roadside accidents. In recent years, the authors have developed many low-cost, computerized, driver fatigue detection systems (DFDs) to help drivers, by using multi-sensors, and mobile and cloud-based computing architecture. To promote safe driving, these are the most current emerging platforms that were introduced in the past. In this paper, we reviewed state-of-the-art approaches for predicting unsafe driving styles using three common IoT-based architectures. The novelty of this article is to show major differences among multi-sensors, smartphone-based, and cloud-based architectures in multimodal feature processing. We discussed all of the problems that machine learning techniques faced in recent years, particularly the deep learning (DL) model, to predict driver hypovigilance, especially in terms of these three IoT-based architectures. Moreover, we performed state-of-the-art comparisons by using driving simulators to incorporate multimodal features of the driver. We also mention online data sources in this article to test and train network architecture in the field of DFDs on public available multimodal datasets. These comparisons assist other authors to continue future research in this domain. To evaluate the performance, we mention the major problems in these three architectures to help researchers use the best IoT-based architecture for detecting DFDs in a real-time environment. Moreover, the important factors of Multi-Access Edge Computing (MEC) and 5th generation (5G) networks are analyzed in the context of deep learning architecture to improve the response time of DFD systems. Lastly, it is concluded that there is a research gap when it comes to implementing the DFD systems on MEC and 5G technologies by using multimodal features and DL architecture.}, } @article {pmid33371361, year = {2020}, author = {Alankar, B and Sharma, G and Kaur, H and Valverde, R and Chang, V}, title = {Experimental Setup for Investigating the Efficient Load Balancing Algorithms on Virtual Cloud.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {24}, pages = {}, pmid = {33371361}, issn = {1424-8220}, support = {5753/ IFD/ 2015-16//National Council for Science and 852 Technology Communications (NCSTC), Department of Science and Technology (DST), 853 Ministry of Science and Technology (Govt. of India), New Delhi, India/ ; VCR 0000110//VC Research/ ; }, abstract = {Cloud computing has emerged as the primary choice for developers in developing applications that require high-performance computing. Virtualization technology has helped in the distribution of resources to multiple users. Increased use of cloud infrastructure has led to the challenge of developing a load balancing mechanism to provide optimized use of resources and better performance. Round robin and least connections load balancing algorithms have been developed to allocate user requests across a cluster of servers in the cloud in a time-bound manner. In this paper, we have applied the round robin and least connections approach of load balancing to HAProxy, virtual machine clusters and web servers. The experimental results are visualized and summarized using Apache Jmeter and a further comparative study of round robin and least connections is also depicted. Experimental setup and results show that the round robin algorithm performs better as compared to the least connections algorithm in all measuring parameters of load balancer in this paper.}, } @article {pmid33362806, year = {2020}, author = {Das Choudhury, S and Maturu, S and Samal, A and Stoerger, V and Awada, T}, title = {Leveraging Image Analysis to Compute 3D Plant Phenotypes Based on Voxel-Grid Plant Reconstruction.}, journal = {Frontiers in plant science}, volume = {11}, number = {}, pages = {521431}, pmid = {33362806}, issn = {1664-462X}, abstract = {High throughput image-based plant phenotyping facilitates the extraction of morphological and biophysical traits of a large number of plants non-invasively in a relatively short time. It facilitates the computation of advanced phenotypes by considering the plant as a single object (holistic phenotypes) or its components, i.e., leaves and the stem (component phenotypes). The architectural complexity of plants increases over time due to variations in self-occlusions and phyllotaxy, i.e., arrangements of leaves around the stem. One of the central challenges to computing phenotypes from 2-dimensional (2D) single view images of plants, especially at the advanced vegetative stage in presence of self-occluding leaves, is that the information captured in 2D images is incomplete, and hence, the computed phenotypes are inaccurate. We introduce a novel algorithm to compute 3-dimensional (3D) plant phenotypes from multiview images using voxel-grid reconstruction of the plant (3DPhenoMV). The paper also presents a novel method to reliably detect and separate the individual leaves and the stem from the 3D voxel-grid of the plant using voxel overlapping consistency check and point cloud clustering techniques. To evaluate the performance of the proposed algorithm, we introduce the University of Nebraska-Lincoln 3D Plant Phenotyping Dataset (UNL-3DPPD). A generic taxonomy of 3D image-based plant phenotypes are also presented to promote 3D plant phenotyping research. A subset of these phenotypes are computed using computer vision algorithms with discussion of their significance in the context of plant science. The central contributions of the paper are (a) an algorithm for 3D voxel-grid reconstruction of maize plants at the advanced vegetative stages using images from multiple 2D views; (b) a generic taxonomy of 3D image-based plant phenotypes and a public benchmark dataset, i.e., UNL-3DPPD, to promote the development of 3D image-based plant phenotyping research; and (c) novel voxel overlapping consistency check and point cloud clustering techniques to detect and isolate individual leaves and stem of the maize plants to compute the component phenotypes. Detailed experimental analyses demonstrate the efficacy of the proposed method, and also show the potential of 3D phenotypes to explain the morphological characteristics of plants regulated by genetic and environmental interactions.}, } @article {pmid33348559, year = {2020}, author = {Chen, B and Chen, H and Yuan, D and Yu, L}, title = {3D Fast Object Detection Based on Discriminant Images and Dynamic Distance Threshold Clustering.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {24}, pages = {}, pmid = {33348559}, issn = {1424-8220}, support = {2018YFB1201602//National Key Research and Development Plan/ ; 61976224//Natural Science Foundation of China/ ; 2018JJ3689//Natural Science Foundation of Hunan Province of China/ ; }, abstract = {The object detection algorithm based on vehicle-mounted lidar is a key component of the perception system on autonomous vehicles. It can provide high-precision and highly robust obstacle information for the safe driving of autonomous vehicles. However, most algorithms are often based on a large amount of point cloud data, which makes real-time detection difficult. To solve this problem, this paper proposes a 3D fast object detection method based on three main steps: First, the ground segmentation by discriminant image (GSDI) method is used to convert point cloud data into discriminant images for ground points segmentation, which avoids the direct computing of the point cloud data and improves the efficiency of ground points segmentation. Second, the image detector is used to generate the region of interest of the three-dimensional object, which effectively narrows the search range. Finally, the dynamic distance threshold clustering (DDTC) method is designed for different density of the point cloud data, which improves the detection effect of long-distance objects and avoids the over-segmentation phenomenon generated by the traditional algorithm. Experiments have showed that this algorithm can meet the real-time requirements of autonomous driving while maintaining high accuracy.}, } @article {pmid33343851, year = {2020}, author = {Bibi, N and Sikandar, M and Ud Din, I and Almogren, A and Ali, S}, title = {IoMT-Based Automated Detection and Classification of Leukemia Using Deep Learning.}, journal = {Journal of healthcare engineering}, volume = {2020}, number = {}, pages = {6648574}, pmid = {33343851}, issn = {2040-2309}, mesh = {Algorithms ; COVID-19/epidemiology ; Cloud Computing ; Databases, Factual ; *Deep Learning ; *Diagnosis, Computer-Assisted ; Diagnostic Imaging ; Humans ; *Internet of Things ; Leukemia/*classification/*diagnosis ; Leukemia, Lymphocytic, Chronic, B-Cell/diagnosis ; Leukemia, Myelogenous, Chronic, BCR-ABL Positive/diagnosis ; Leukemia, Myeloid, Acute/diagnosis ; Machine Learning ; Neural Networks, Computer ; *Pattern Recognition, Automated ; Precursor Cell Lymphoblastic Leukemia-Lymphoma/diagnosis ; Telemedicine ; }, abstract = {For the last few years, computer-aided diagnosis (CAD) has been increasing rapidly. Numerous machine learning algorithms have been developed to identify different diseases, e.g., leukemia. Leukemia is a white blood cells- (WBC-) related illness affecting the bone marrow and/or blood. A quick, safe, and accurate early-stage diagnosis of leukemia plays a key role in curing and saving patients' lives. Based on developments, leukemia consists of two primary forms, i.e., acute and chronic leukemia. Each form can be subcategorized as myeloid and lymphoid. There are, therefore, four leukemia subtypes. Various approaches have been developed to identify leukemia with respect to its subtypes. However, in terms of effectiveness, learning process, and performance, these methods require improvements. This study provides an Internet of Medical Things- (IoMT-) based framework to enhance and provide a quick and safe identification of leukemia. In the proposed IoMT system, with the help of cloud computing, clinical gadgets are linked to network resources. The system allows real-time coordination for testing, diagnosis, and treatment of leukemia among patients and healthcare professionals, which may save both time and efforts of patients and clinicians. Moreover, the presented framework is also helpful for resolving the problems of patients with critical condition in pandemics such as COVID-19. The methods used for the identification of leukemia subtypes in the suggested framework are Dense Convolutional Neural Network (DenseNet-121) and Residual Convolutional Neural Network (ResNet-34). Two publicly available datasets for leukemia, i.e., ALL-IDB and ASH image bank, are used in this study. The results demonstrated that the suggested models supersede the other well-known machine learning algorithms used for healthy-versus-leukemia-subtypes identification.}, } @article {pmid33333717, year = {2020}, author = {Khorsheed, MB and Zainel, QM and Hassen, OA and Darwish, SM}, title = {The Application of Fractal Transform and Entropy for Improving Fault Tolerance and Load Balancing in Grid Computing Environments.}, journal = {Entropy (Basel, Switzerland)}, volume = {22}, number = {12}, pages = {}, pmid = {33333717}, issn = {1099-4300}, abstract = {This paper applies the entropy-based fractal indexing scheme that enables the grid environment for fast indexing and querying. It addresses the issue of fault tolerance and load balancing-based fractal management to make computational grids more effective and reliable. A fractal dimension of a cloud of points gives an estimate of the intrinsic dimensionality of the data in that space. The main drawback of this technique is the long computing time. The main contribution of the suggested work is to investigate the effect of fractal transform by adding R-tree index structure-based entropy to existing grid computing models to obtain a balanced infrastructure with minimal fault. In this regard, the presented work is going to extend the commonly scheduling algorithms that are built based on the physical grid structure to a reduced logical network. The objective of this logical network is to reduce the searching in the grid paths according to arrival time rate and path's bandwidth with respect to load balance and fault tolerance, respectively. Furthermore, an optimization searching technique is utilized to enhance the grid performance by investigating the optimum number of nodes extracted from the logical grid. The experimental results indicated that the proposed model has better execution time, throughput, makespan, latency, load balancing, and success rate.}, } @article {pmid33332376, year = {2020}, author = {Khan, A and Nawaz, U and Ulhaq, A and Robinson, RW}, title = {Real-time plant health assessment via implementing cloud-based scalable transfer learning on AWS DeepLens.}, journal = {PloS one}, volume = {15}, number = {12}, pages = {e0243243}, pmid = {33332376}, issn = {1932-6203}, mesh = {Algorithms ; *Cloud Computing ; Image Processing, Computer-Assisted/*methods ; *Machine Learning ; *Plant Diseases/classification ; *Plant Leaves/anatomy & histology ; }, abstract = {The control of plant leaf diseases is crucial as it affects the quality and production of plant species with an effect on the economy of any country. Automated identification and classification of plant leaf diseases is, therefore, essential for the reduction of economic losses and the conservation of specific species. Various Machine Learning (ML) models have previously been proposed to detect and identify plant leaf disease; however, they lack usability due to hardware sophistication, limited scalability and realistic use inefficiency. By implementing automatic detection and classification of leaf diseases in fruit trees (apple, grape, peach and strawberry) and vegetable plants (potato and tomato) through scalable transfer learning on Amazon Web Services (AWS) SageMaker and importing it into AWS DeepLens for real-time functional usability, our proposed DeepLens Classification and Detection Model (DCDM) addresses such limitations. Scalability and ubiquitous access to our approach is provided by cloud integration. Our experiments on an extensive image data set of healthy and unhealthy fruit trees and vegetable plant leaves showed 98.78% accuracy with a real-time diagnosis of diseases of plant leaves. To train DCDM deep learning model, we used forty thousand images and then evaluated it on ten thousand images. It takes an average of 0.349s to test an image for disease diagnosis and classification using AWS DeepLens, providing the consumer with disease information in less than a second.}, } @article {pmid33329060, year = {2020}, author = {Molina-Molina, A and Ruiz-Malagón, EJ and Carrillo-Pérez, F and Roche-Seruendo, LE and Damas, M and Banos, O and García-Pinillos, F}, title = {Validation of mDurance, A Wearable Surface Electromyography System for Muscle Activity Assessment.}, journal = {Frontiers in physiology}, volume = {11}, number = {}, pages = {606287}, pmid = {33329060}, issn = {1664-042X}, abstract = {The mDurance® system is an innovative digital tool that combines wearable surface electromyography (sEMG), mobile computing and cloud analysis to streamline and automatize the assessment of muscle activity. The tool is particularly devised to support clinicians and sport professionals in their daily routines, as an assessment tool in the prevention, monitoring rehabilitation and training field. This study aimed at determining the validity of the mDurance system for measuring muscle activity by comparing sEMG output with a reference sEMG system, the Delsys® system. Fifteen participants were tested during isokinetic knee extensions at three different speeds (60, 180, and 300 deg/s), for two muscles (rectus femoris [RF] and vastus lateralis [VL]) and two different electrodes locations (proximal and distal placement). The maximum voluntary isometric contraction was carried out for the normalization of the signal, followed by dynamic isokinetic knee extensions for each speed. The sEMG output for both systems was obtained from the raw sEMG signal following mDurance's processing and filtering. Mean, median, first quartile, third quartile and 90th percentile was calculated from the sEMG amplitude signals for each system. The results show an almost perfect ICC relationship for the VL (ICC > 0.81) and substantial to almost perfect for the RF (ICC > 0.762) for all variables and speeds. The Bland-Altman plots revealed heteroscedasticity of error for mean, quartile 3 and 90th percentile (60 and 300 deg/s) for RF and at mean and 90th percentile for VL (300 deg/s). In conclusion, the results indicate that the mDurance® sEMG system is a valid tool to measure muscle activity during dynamic contractions over a range of speeds. This innovative system provides more time for clinicians (e.g., interpretation patients' pathologies) and sport trainers (e.g., advising athletes), thanks to automatic processing and filtering of the raw sEMG signal and generation of muscle activity reports in real-time.}, } @article {pmid33327512, year = {2020}, author = {Filev Maia, R and Ballester Lurbe, C and Agrahari Baniya, A and Hornbuckle, J}, title = {IRRISENS: An IoT Platform Based on Microservices Applied in Commercial-Scale Crops Working in a Multi-Cloud Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {24}, pages = {}, pmid = {33327512}, issn = {1424-8220}, support = {1920FRP//Australian Government Department of Agriculture/ ; }, abstract = {Research has shown the multitude of applications that Internet of Things (IoT), cloud computing, and forecast technologies present in every sector. In agriculture, one application is the monitoring of factors that influence crop development to assist in making crop management decisions. Research on the application of such technologies in agriculture has been mainly conducted at small experimental sites or under controlled conditions. This research has provided relevant insights and guidelines for the use of different types of sensors, application of a multitude of algorithms to forecast relevant parameters as well as architectural approaches of IoT platforms. However, research on the implementation of IoT platforms at the commercial scale is needed to identify platform requirements to properly function under such conditions. This article evaluates an IoT platform (IRRISENS) based on fully replicable microservices used to sense soil, crop, and atmosphere parameters, interact with third-party cloud services for scheduling irrigation and, potentially, control irrigation automatically. The proposed IoT platform was evaluated during one growing season at four commercial-scale farms on two broadacre irrigated crops with very different water management requirements (rice and cotton). Five main requirements for IoT platforms to be used in agriculture at commercial scale were identified from implementing IRRISENS as an irrigation support tool for rice and cotton production: scalability, flexibility, heterogeneity, robustness to failure, and security. The platform addressed all these requirements. The results showed that the microservice-based approach used is robust against both intermittent and critical failures in the field that could occur in any of the monitored sites. Further, processing or storage overload caused by datalogger malfunctioning or other reasons at one farm did not affect the platform's performance. The platform was able to deal with different types of data heterogeneity. Since there are no shared microservices among farms, the IoT platform proposed here also provides data isolation, maintaining data confidentiality for each user, which is relevant in a commercial farm scenario.}, } @article {pmid33327453, year = {2020}, author = {Suryanto, N and Kang, H and Kim, Y and Yun, Y and Larasati, HT and Kim, H}, title = {A Distributed Black-Box Adversarial Attack Based on Multi-Group Particle Swarm Optimization.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {24}, pages = {}, pmid = {33327453}, issn = {1424-8220}, support = {2019-0-01343//Ministry of Science and ICT, South Korea/ ; IITP-2020-0-01797//Ministry of Science and ICT, South Korea/ ; }, mesh = {*Algorithms ; *Artificial Intelligence ; Humans ; }, abstract = {Adversarial attack techniques in deep learning have been studied extensively due to its stealthiness to human eyes and potentially dangerous consequences when applied to real-life applications. However, current attack methods in black-box settings mainly employ a large number of queries for crafting their adversarial examples, hence making them very likely to be detected and responded by the target system (e.g., artificial intelligence (AI) service provider) due to its high traffic volume. A recent proposal able to address the large query problem utilizes a gradient-free approach based on Particle Swarm Optimization (PSO) algorithm. Unfortunately, this original approach tends to have a low attack success rate, possibly due to the model's difficulty of escaping local optima. This obstacle can be overcome by employing a multi-group approach for PSO algorithm, by which the PSO particles can be redistributed, preventing them from being trapped in local optima. In this paper, we present a black-box adversarial attack which can significantly increase the success rate of PSO-based attack while maintaining a low number of query by launching the attack in a distributed manner. Attacks are executed from multiple nodes, disseminating queries among the nodes, hence reducing the possibility of being recognized by the target system while also increasing scalability. Furthermore, we utilize Multi-Group PSO with Random Redistribution (MGRR-PSO) for perturbation generation, performing better than the original approach against local optima, thus achieving a higher success rate. Additionally, we propose to efficiently remove excessive perturbation (i.e, perturbation pruning) by utilizing again the MGRR-PSO rather than a standard iterative method as used in the original approach. We perform five different experiments: comparing our attack's performance with existing algorithms, testing in high-dimensional space in ImageNet dataset, examining our hyperparameters (i.e., particle size, number of clients, search boundary), and testing on real digital attack to Google Cloud Vision. Our attack proves to obtain a 100% success rate on MNIST and CIFAR-10 datasets and able to successfully fool Google Cloud Vision as a proof of the real digital attack by maintaining a lower query and wide applicability.}, } @article {pmid33311728, year = {2020}, author = {Karim, HMR}, title = {Cloud computing-based remote pre-anaesthetic check-up: An adapted approach during corona pandemic.}, journal = {Indian journal of anaesthesia}, volume = {64}, number = {Suppl 4}, pages = {S248-S249}, pmid = {33311728}, issn = {0019-5049}, } @article {pmid33297921, year = {2021}, author = {Singh, NK and Kumar, N and Singh, AK}, title = {Physiology to Disease Transmission of Respiratory Tract Infection: A Narrative Review.}, journal = {Infectious disorders drug targets}, volume = {21}, number = {6}, pages = {e170721188930}, doi = {10.2174/1871526520666201209145908}, pmid = {33297921}, issn = {2212-3989}, mesh = {Air Microbiology ; *COVID-19 ; Humans ; *Respiratory Tract Infections ; SARS-CoV-2 ; Sneezing ; }, abstract = {INTRODUCTION: In the current scenario of the COVID 19 pandemic, the protective reflexes, namely sneeze and cough, have received great importance. However, it is not in terms of protection but in terms of the spread of infection. The present review tries to bring out the correlation between the physiology of sneeze and cough, taking into consideration the various receptors that initiate the two reflexes, then correlating it with the formation of expelled droplets and the significance of various aspects of droplets that lead to the spread of infection.

MATERIAL AND METHODS: For the compilation of the present review, we searched the terms "Physiology of cough", "Physiology of sneeze", "droplets", "aerosols" and "Aerosols in COVID 19". The above-mentioned terms were extensively searched on PubMed, Google Scholar, and google search engine. After reviewing the various available material, the most significant research has been considered for this review.

CONCLUSION: Through this review, we conclude that there are various factors responsible for the initiation of sneeze and cough, but in the case of infection, it is mainly the inflammatory reaction that directly stimulates the receptors to produce the reflex outburst air. As the flow of air during expiration is turbulent, it causes damage to the Epithelial Lining Fluid present in the respiratory conduit. In addition, it gets admixed with the saliva in the oropharynx and oral cavity and mucus in the nose to form droplets of various sizes. Large droplets settle close and are responsible for droplet and fomite transmission, but the smaller droplets remain suspended in the air and travel farther distances to cause airborne transmission. The spread of droplet cloud in sneezing may range to 6m or more as compared to cough; hence the concept of 1m to 2m of social distancing does not hold reliable if the patient is sneezing.}, } @article {pmid33297386, year = {2020}, author = {Sheng, J and Liu, C and Chen, L and Wang, B and Zhang, J}, title = {Research on Community Detection in Complex Networks Based on Internode Attraction.}, journal = {Entropy (Basel, Switzerland)}, volume = {22}, number = {12}, pages = {}, pmid = {33297386}, issn = {1099-4300}, support = {No.2018YFB1003602//National Key Research and Development Program of China/ ; }, abstract = {With the rapid development of computer technology, the research on complex networks has attracted more and more attention. At present, the research directions of cloud computing, big data, internet of vehicles, and distributed systems with very high attention are all based on complex networks. Community structure detection is a very important and meaningful research hotspot in complex networks. It is a difficult task to quickly and accurately divide the community structure and run it on large-scale networks. In this paper, we put forward a new community detection approach based on internode attraction, named IACD. This algorithm starts from the perspective of the important nodes of the complex network and refers to the gravitational relationship between two objects in physics to represent the forces between nodes in the network dataset, and then perform community detection. Through experiments on a large number of real-world datasets and synthetic networks, it is shown that the IACD algorithm can quickly and accurately divide the community structure, and it is superior to some classic algorithms and recently proposed algorithms.}, } @article {pmid33292419, year = {2020}, author = {Abbasi, WA and Yaseen, A and Hassan, FU and Andleeb, S and Minhas, FUAA}, title = {ISLAND: in-silico proteins binding affinity prediction using sequence information.}, journal = {BioData mining}, volume = {13}, number = {1}, pages = {20}, pmid = {33292419}, issn = {1756-0381}, support = {Open Access Publishing Support//University of Warwick/ ; 213-58990-2PS2-046//Higher Education Commission, Pakistan/ ; NRPU 6085//Higher Education Commision, Pakistan/ ; }, abstract = {BACKGROUND: Determining binding affinity in protein-protein interactions is important in the discovery and design of novel therapeutics and mutagenesis studies. Determination of binding affinity of proteins in the formation of protein complexes requires sophisticated, expensive and time-consuming experimentation which can be replaced with computational methods. Most computational prediction techniques require protein structures that limit their applicability to protein complexes with known structures. In this work, we explore sequence-based protein binding affinity prediction using machine learning.

METHOD: We have used protein sequence information instead of protein structures along with machine learning techniques to accurately predict the protein binding affinity.

RESULTS: We present our findings that the true generalization performance of even the state-of-the-art sequence-only predictor is far from satisfactory and that the development of machine learning methods for binding affinity prediction with improved generalization performance is still an open problem. We have also proposed a sequence-based novel protein binding affinity predictor called ISLAND which gives better accuracy than existing methods over the same validation set as well as on external independent test dataset. A cloud-based webserver implementation of ISLAND and its python code are available at https://sites.google.com/view/wajidarshad/software .

CONCLUSION: This paper highlights the fact that the true generalization performance of even the state-of-the-art sequence-only predictor of binding affinity is far from satisfactory and that the development of effective and practical methods in this domain is still an open problem.}, } @article {pmid33291634, year = {2020}, author = {Balaniuk, R and Isupova, O and Reece, S}, title = {Mining and Tailings Dam Detection in Satellite Imagery Using Deep Learning.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {23}, pages = {}, pmid = {33291634}, issn = {1424-8220}, support = {000//Fundação de Apoio à Pesquisa do Distrito Federal - Brazil/ ; }, abstract = {This work explores the combination of free cloud computing, free open-source software, and deep learning methods to analyze a real, large-scale problem: the automatic country-wide identification and classification of surface mines and mining tailings dams in Brazil. Locations of officially registered mines and dams were obtained from the Brazilian government open data resource. Multispectral Sentinel-2 satellite imagery, obtained and processed at the Google Earth Engine platform, was used to train and test deep neural networks using the TensorFlow 2 application programming interface (API) and Google Colaboratory (Colab) platform. Fully convolutional neural networks were used in an innovative way to search for unregistered ore mines and tailing dams in large areas of the Brazilian territory. The efficacy of the approach is demonstrated by the discovery of 263 mines that do not have an official mining concession. This exploratory work highlights the potential of a set of new technologies, freely available, for the construction of low cost data science tools that have high social impact. At the same time, it discusses and seeks to suggest practical solutions for the complex and serious problem of illegal mining and the proliferation of tailings dams, which pose high risks to the population and the environment, especially in developing countries.}, } @article {pmid33291483, year = {2020}, author = {Zhang, S and Wen, Q and Li, W and Zhang, H and Jin, Z}, title = {A Multi-User Public Key Encryption with Multi-Keyword Search out of Bilinear Pairings.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {23}, pages = {}, pmid = {33291483}, issn = {1424-8220}, support = {61502044//National Natural Science Foundation of China/ ; }, abstract = {Internet of Things (IoT) and cloud computing are adopted widely in daily life and industrial production. Sensors of IoT equipment gather personal, sensitive and important data, which is stored in a cloud server. The cloud helps users to save cost and collaborate. However, the privacy of data is also at risk. Public-key encryption with keyword search (PEKS) is convenient for users to use the data without leaking privacy. In this article, we give a scheme of PEKS for a multi-user to realize the multi-keyword search at once and extend it to show a rank based on keywords match. The receiver can finish the search by himself or herself. With private cloud and server cloud, most users' computing can be outsourced. Moreover, the PEKS can be transferred to a multi-user model in which the private cloud is used to manage receivers and outsource. The store cloud and the private cloud both obtain nothing with the keyword information. Then our IoT devices can easily run these protocols. As we do not use any pairing operations, the scheme is under more general assumptions that means the devices do not need to take on the heavy task of calculating pairing.}, } @article {pmid33287155, year = {2020}, author = {Chen, Y and Yang, T and Li, C and Zhang, Y}, title = {A Binarized Segmented ResNet Based on Edge Computing for Re-Identification.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {23}, pages = {}, pmid = {33287155}, issn = {1424-8220}, support = {61802001//National Natural Science Foundation of China/ ; }, abstract = {With the advent of the Internet of Everything, more and more devices are connected to the Internet every year. In major cities, in order to maintain normal social order, the demand for deployed cameras is also increasing. In terms of public safety, person Re-Identification (ReID) can play a big role. However, the current methods of ReID are to transfer the collected pedestrian images to the cloud for processing, which will bring huge communication costs. In order to solve this problem, we combine the recently emerging edge computing and use the edge to combine the end devices and the cloud to implement our proposed binarized segmented ResNet. Our method is mainly to divide a complete ResNet into three parts, corresponding to the end devices, the edge, and the cloud. After joint training, the corresponding segmented sub-network is deployed to the corresponding side, and inference is performed to realize ReID. In our experiments, we compared some traditional ReID methods in terms of accuracy and communication overhead. It can be found that our method can greatly reduce the communication cost on the basis of basically not reducing the recognition accuracy of ReID. In general, the communication cost can be reduced by four to eight times.}, } @article {pmid33286195, year = {2020}, author = {Zhou, Y and Li, N and Tian, Y and An, D and Wang, L}, title = {Public Key Encryption with Keyword Search in Cloud: A Survey.}, journal = {Entropy (Basel, Switzerland)}, volume = {22}, number = {4}, pages = {}, pmid = {33286195}, issn = {1099-4300}, support = {2018CXGC0701//Shandong Provincial Key Research and Development Program of China/ ; No. 61972050//National Natural Science Foundation of China (NSFC)/ ; }, abstract = {With the popularization of cloud computing, many business and individuals prefer to outsource their data to cloud in encrypted form to protect data confidentiality. However, how to search over encrypted data becomes a concern for users. To address this issue, searchable encryption is a novel cryptographic primitive that enables user to search queries over encrypted data stored on an untrusted server while guaranteeing the privacy of the data. Public key encryption with keyword search (PEKS) has received a lot of attention as an important branch. In this paper, we focus on the development of PEKS in cloud by providing a comprehensive research survey. From a technological viewpoint, the existing PEKS schemes can be classified into several variants: PEKS based on public key infrastructure, PEKS based on identity-based encryption, PEKS based on attribute-based encryption, PEKS based on predicate encryption, PEKS based on certificateless encryption, and PEKS supporting proxy re-encryption. Moreover, we propose some potential applications and valuable future research directions in PEKS.}, } @article {pmid33270670, year = {2020}, author = {Salama AbdELminaam, D and Almansori, AM and Taha, M and Badr, E}, title = {A deep facial recognition system using computational intelligent algorithms.}, journal = {PloS one}, volume = {15}, number = {12}, pages = {e0242269}, pmid = {33270670}, issn = {1932-6203}, mesh = {Algorithms ; *Artificial Intelligence ; Deep Learning ; *Facial Recognition ; Humans ; *Machine Learning ; Neural Networks, Computer ; *Support Vector Machine ; }, abstract = {The development of biometric applications, such as facial recognition (FR), has recently become important in smart cities. Many scientists and engineers around the world have focused on establishing increasingly robust and accurate algorithms and methods for these types of systems and their applications in everyday life. FR is developing technology with multiple real-time applications. The goal of this paper is to develop a complete FR system using transfer learning in fog computing and cloud computing. The developed system uses deep convolutional neural networks (DCNN) because of the dominant representation; there are some conditions including occlusions, expressions, illuminations, and pose, which can affect the deep FR performance. DCNN is used to extract relevant facial features. These features allow us to compare faces between them in an efficient way. The system can be trained to recognize a set of people and to learn via an online method, by integrating the new people it processes and improving its predictions on the ones it already has. The proposed recognition method was tested with different three standard machine learning algorithms (Decision Tree (DT), K Nearest Neighbor(KNN), Support Vector Machine (SVM)). The proposed system has been evaluated using three datasets of face images (SDUMLA-HMT, 113, and CASIA) via performance metrics of accuracy, precision, sensitivity, specificity, and time. The experimental results show that the proposed method achieves superiority over other algorithms according to all parameters. The suggested algorithm results in higher accuracy (99.06%), higher precision (99.12%), higher recall (99.07%), and higher specificity (99.10%) than the comparison algorithms.}, } @article {pmid33268451, year = {2020}, author = {Aigouy, B and Cortes, C and Liu, S and Prud'Homme, B}, title = {EPySeg: a coding-free solution for automated segmentation of epithelia using deep learning.}, journal = {Development (Cambridge, England)}, volume = {147}, number = {24}, pages = {}, pmid = {33268451}, issn = {1477-9129}, mesh = {Computational Biology ; Deep Learning ; Epithelium/*growth & development ; Humans ; Image Processing, Computer-Assisted ; Morphogenesis/*genetics ; *Software ; }, abstract = {Epithelia are dynamic tissues that self-remodel during their development. During morphogenesis, the tissue-scale organization of epithelia is obtained through a sum of individual contributions of the cells constituting the tissue. Therefore, understanding any morphogenetic event first requires a thorough segmentation of its constituent cells. This task, however, usually involves extensive manual correction, even with semi-automated tools. Here, we present EPySeg, an open-source, coding-free software that uses deep learning to segment membrane-stained epithelial tissues automatically and very efficiently. EPySeg, which comes with a straightforward graphical user interface, can be used as a Python package on a local computer, or on the cloud via Google Colab for users not equipped with deep-learning compatible hardware. By substantially reducing human input in image segmentation, EPySeg accelerates and improves the characterization of epithelial tissues for all developmental biologists.}, } @article {pmid33266523, year = {2020}, author = {Cai, Y and Tang, C and Xu, Q}, title = {Two-Party Privacy-Preserving Set Intersection with FHE.}, journal = {Entropy (Basel, Switzerland)}, volume = {22}, number = {12}, pages = {}, pmid = {33266523}, issn = {1099-4300}, support = {61772147//Foundation of National Natural Science of China under Grant/ ; 2015A030308016//Guangdong Province Natural Science Foundation of major basic research and Cultivation project under Grant/ ; 2015KCXTD014//Project of Ordinary University Innovation Team Construction of Guangdong Province under Grant/ ; 2014KZDXM044//Basic Research Major Projects of Department of education of Guangdong Province under Grant/ ; 1201610005//Collaborative Innovation Major Projects of Bureau of Education of Guangzhou City under Grant/ ; 2019B020215004//Key-Area Research and Development Plan of Guangdong province under Grant/ ; }, abstract = {A two-party private set intersection allows two parties, the client and the server, to compute an intersection over their private sets, without revealing any information beyond the intersecting elements. We present a novel private set intersection protocol based on Shuhong Gao's fully homomorphic encryption scheme and prove the security of the protocol in the semi-honest model. We also present a variant of the protocol which is a completely novel construction for computing the intersection based on Bloom filter and fully homomorphic encryption, and the protocol's complexity is independent of the set size of the client. The security of the protocols relies on the learning with errors and ring learning with error problems. Furthermore, in the cloud with malicious adversaries, the computation of the private set intersection can be outsourced to the cloud service provider without revealing any private information.}, } @article {pmid33266243, year = {2020}, author = {Froiz-Míguez, I and Lopez-Iturri, P and Fraga-Lamas, P and Celaya-Echarri, M and Blanco-Novoa, Ó and Azpilicueta, L and Falcone, F and Fernández-Caramés, TM}, title = {Design, Implementation, and Empirical Validation of an IoT Smart Irrigation System for Fog Computing Applications Based on LoRa and LoRaWAN Sensor Nodes.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {23}, pages = {}, pmid = {33266243}, issn = {1424-8220}, support = {ED431C 2020/15, ED431G490 2019/01//Xunta de Galicia/ ; RTI2018-095499-B-C31, TEC2016-75067-C4-1-R, RED2018-102668-T and PID2019-104958RB-C42//Agencia Estatal de Investigación of Spain and ERDF funds of the EU (FEDER Galicia 2014-2020 & AEI/FEDER Programs, UE)/ ; }, abstract = {Climate change is driving new solutions to manage water more efficiently. Such solutions involve the development of smart irrigation systems where Internet of Things (IoT) nodes are deployed throughout large areas. In addition, in the mentioned areas, wireless communications can be difficult due to the presence of obstacles and metallic objects that block electromagnetic wave propagation totally or partially. This article details the development of a smart irrigation system able to cover large urban areas thanks to the use of Low-Power Wide-Area Network (LPWAN) sensor nodes based on LoRa and LoRaWAN. IoT nodes collect soil temperature/moisture and air temperature data, and control water supply autonomously, either by making use of fog computing gateways or by relying on remote commands sent from a cloud. Since the selection of IoT node and gateway locations is essential to have good connectivity and to reduce energy consumption, this article uses an in-house 3D-ray launching radio-planning tool to determine the best locations in real scenarios. Specifically, this paper provides details on the modeling of a university campus, which includes elements like buildings, roads, green areas, or vehicles. In such a scenario, simulations and empirical measurements were performed for two different testbeds: a LoRaWAN testbed that operates at 868 MHz and a testbed based on LoRa with 433 MHz transceivers. All the measurements agree with the simulation results, showing the impact of shadowing effects and material features (e.g., permittivity, conductivity) in the electromagnetic propagation of near-ground and underground LoRaWAN communications. Higher RF power levels are observed for 433 MHz due to the higher transmitted power level and the lower radio propagation losses, and even in the worst gateway location, the received power level is higher than the sensitivity threshold (-148 dBm). Regarding water consumption, the provided estimations indicate that the proposed smart irrigation system is able to reduce roughly 23% of the amount of used water just by considering weather forecasts. The obtained results provide useful guidelines for future smart irrigation developers and show the radio planning tool accuracy, which allows for optimizing the sensor network topology and the overall performance of the network in terms of coverage, cost, and energy consumption.}, } @article {pmid33260321, year = {2020}, author = {Xu, S and Guo, C}, title = {Computation Offloading in a Cognitive Vehicular Networks with Vehicular Cloud Computing and Remote Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {23}, pages = {}, pmid = {33260321}, issn = {1424-8220}, support = {4202049//Beijing Natural Science Foundation/ ; 2018YFB1800805//National Key R\&D Program of China/ ; }, abstract = {To satisfy the explosive growth of computation-intensive vehicular applications, we investigated the computation offloading problem in a cognitive vehicular networks (CVN). Specifically, in our scheme, the vehicular cloud computing (VCC)- and remote cloud computing (RCC)-enabled computation offloading were jointly considered. So far, extensive research has been conducted on RCC-based computation offloading, while the studies on VCC-based computation offloading are relatively rare. In fact, due to the dynamic and uncertainty of on-board resource, the VCC-based computation offloading is more challenging then the RCC one, especially under the vehicular scenario with expensive inter-vehicle communication or poor communication environment. To solve this problem, we propose to leverage the VCC's computation resource for computation offloading with a perception-exploitation way, which mainly comprise resource discovery and computation offloading two stages. In resource discovery stage, upon the action-observation history, a Long Short-Term Memory (LSTM) model is proposed to predict the on-board resource utilizing status at next time slot. Thereafter, based on the obtained computation resource distribution, a decentralized multi-agent Deep Reinforcement Learning (DRL) algorithm is proposed to solve the collaborative computation offloading with VCC and RCC. Last but not least, the proposed algorithms' effectiveness is verified with a host of numerical simulation results from different perspectives.}, } @article {pmid33256006, year = {2020}, author = {Bandyopadhyay, A and Kumar Singh, V and Mukhopadhyay, S and Rai, U and Xhafa, F and Krause, P}, title = {Matching IoT Devices to the Fog Service Providers: A Mechanism Design Perspective.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {23}, pages = {}, pmid = {33256006}, issn = {1424-8220}, support = {PhD-MLA/4(29)/2014-15//Visvesvaraya National Institute of Technology/ ; PRX19/00155//Spanish Ministry of Science, 610 Innovation and Universities/ ; PID2019-111100RB-C21/AEI/ 10.13039/501100011033//Ministerio de Ciencia e Innovación, Spain/ ; }, abstract = {In the Internet of Things (IoT) + Fog + Cloud architecture, with the unprecedented growth of IoT devices, one of the challenging issues that needs to be tackled is to allocate Fog service providers (FSPs) to IoT devices, especially in a game-theoretic environment. Here, the issue of allocation of FSPs to the IoT devices is sifted with game-theoretic idea so that utility maximizing agents may be benign. In this scenario, we have multiple IoT devices and multiple FSPs, and the IoT devices give preference ordering over the subset of FSPs. Given such a scenario, the goal is to allocate at most one FSP to each of the IoT devices. We propose mechanisms based on the theory of mechanism design without money to allocate FSPs to the IoT devices. The proposed mechanisms have been designed in a flexible manner to address the long and short duration access of the FSPs to the IoT devices. For analytical results, we have proved the economic robustness, and probabilistic analyses have been carried out for allocation of IoT devices to the FSPs. In simulation, mechanism efficiency is laid out under different scenarios with an implementation in Python.}, } @article {pmid33255294, year = {2020}, author = {Díaz-de-Arcaya, J and Miñón, R and Torre-Bastida, AI and Del Ser, J and Almeida, A}, title = {PADL: A Modeling and Deployment Language for Advanced Analytical Services.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {23}, pages = {}, pmid = {33255294}, issn = {1424-8220}, support = {KK-2020/00049//SPRI-Basque Government ELKARTEK 3KIA/ ; IT1294-19//Consolidated Research Group MATHMODE/ ; RTI2018-101045-A-C22//Ministerio de Ciencia y Tecnología/ ; }, abstract = {In the smart city context, Big Data analytics plays an important role in processing the data collected through IoT devices. The analysis of the information gathered by sensors favors the generation of specific services and systems that not only improve the quality of life of the citizens, but also optimize the city resources. However, the difficulties of implementing this entire process in real scenarios are manifold, including the huge amount and heterogeneity of the devices, their geographical distribution, and the complexity of the necessary IT infrastructures. For this reason, the main contribution of this paper is the PADL description language, which has been specifically tailored to assist in the definition and operationalization phases of the machine learning life cycle. It provides annotations that serve as an abstraction layer from the underlying infrastructure and technologies, hence facilitating the work of data scientists and engineers. Due to its proficiency in the operationalization of distributed pipelines over edge, fog, and cloud layers, it is particularly useful in the complex and heterogeneous environments of smart cities. For this purpose, PADL contains functionalities for the specification of monitoring, notifications, and actuation capabilities. In addition, we provide tools that facilitate its adoption in production environments. Finally, we showcase the usefulness of the language by showing the definition of PADL-compliant analytical pipelines over two uses cases in a smart city context (flood control and waste management), demonstrating that its adoption is simple and beneficial for the definition of information and process flows in such environments.}, } @article {pmid33237919, year = {2020}, author = {Gonzalez Villasanti, H and Justice, LM and Chaparro-Moreno, LJ and Lin, TJ and Purtell, K}, title = {Automatized analysis of children's exposure to child-directed speech in reschool settings: Validation and application.}, journal = {PloS one}, volume = {15}, number = {11}, pages = {e0242511}, pmid = {33237919}, issn = {1932-6203}, mesh = {Adult ; Automated Facial Recognition/*methods ; Child, Preschool/*education ; Cloud Computing ; Facial Expression ; Female ; Humans ; Interpersonal Relations ; Language Development ; Machine Learning ; Peer Group ; Phonetics ; *Speech ; Speech Perception ; *Speech Recognition Software ; *Teaching ; Video Recording ; }, abstract = {The present study explored whether a tool for automatic detection and recognition of interactions and child-directed speech (CDS) in preschool classrooms could be developed, validated, and applied to non-coded video recordings representing children's classroom experiences. Using first-person video recordings collected by 13 preschool children during a morning in their classrooms, we extracted high-level audiovisual features from recordings using automatic speech recognition and computer vision services from a cloud computing provider. Using manual coding for interactions and transcriptions of CDS as reference, we trained and tested supervised classifiers and linear mappings to measure five variables of interest. We show that the supervised classifiers trained with speech activity, proximity, and high-level facial features achieve adequate accuracy in detecting interactions. Furthermore, in combination with an automatic speech recognition service, the supervised classifier achieved error rates for CDS measures that are in line with other open-source automatic decoding tools in early childhood settings. Finally, we demonstrate our tool's applicability by using it to automatically code and transcribe children's interactions and CDS exposure vertically within a classroom day (morning to afternoon) and horizontally over time (fall to winter). Developing and scaling tools for automatized capture of children's interactions with others in the preschool classroom, as well as exposure to CDS, may revolutionize scientific efforts to identify precise mechanisms that foster young children's language development.}, } @article {pmid33232315, year = {2020}, author = {Haas, T}, title = {Developing political-ecological theory: The need for many-task computing.}, journal = {PloS one}, volume = {15}, number = {11}, pages = {e0226861}, pmid = {33232315}, issn = {1932-6203}, mesh = {*Acinonyx ; Algorithms ; Animals ; Cloud Computing ; Conservation of Natural Resources/legislation & jurisprudence/*methods ; Ecosystem ; Endangered Species ; Models, Theoretical ; Politics ; }, abstract = {Models of political-ecological systems can inform policies for managing ecosystems that contain endangered species. To increase the credibility of these models, massive computation is needed to statistically estimate the model's parameters, compute confidence intervals for these parameters, determine the model's prediction error rate, and assess its sensitivity to parameter misspecification. To meet this statistical and computational challenge, this article delivers statistical algorithms and a method for constructing ecosystem management plans that are coded as distributed computing applications. These applications can run on cluster computers, the cloud, or a collection of in-house workstations. This downloadable code is used to address the challenge of conserving the East African cheetah (Acinonyx jubatus). This demonstration means that the new standard of credibility that any political-ecological model needs to meet is the one given herein.}, } @article {pmid33217896, year = {2020}, author = {Hassan, SR and Ahmad, I and Ahmad, S and Alfaify, A and Shafiq, M}, title = {Remote Pain Monitoring Using Fog Computing for e-Healthcare: An Efficient Architecture.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {22}, pages = {}, pmid = {33217896}, issn = {1424-8220}, support = {RSP-2020/256//King Saud University, Riyadh, Saudi Arabia/ ; }, mesh = {Cloud Computing ; Delivery of Health Care ; Electrocardiography ; Electromyography ; Humans ; *Internet of Things ; Pain/*diagnosis ; *Remote Sensing Technology ; *Telemedicine ; Wireless Technology ; }, abstract = {The integration of medical signal processing capabilities and advanced sensors into Internet of Things (IoT) devices plays a key role in providing comfort and convenience to human lives. As the number of patients is increasing gradually, providing healthcare facilities to each patient, particularly to the patients located in remote regions, not only has become challenging but also results in several issues, such as: (i) increase in workload on paramedics, (ii) wastage of time, and (iii) accommodation of patients. Therefore, the design of smart healthcare systems has become an important area of research to overcome these above-mentioned issues. Several healthcare applications have been designed using wireless sensor networks (WSNs), cloud computing, and fog computing. Most of the e-healthcare applications are designed using the cloud computing paradigm. Cloud-based architecture introduces high latency while processing huge amounts of data, thus restricting the large-scale implementation of latency-sensitive e-healthcare applications. Fog computing architecture offers processing and storage resources near to the edge of the network, thus, designing e-healthcare applications using the fog computing paradigm is of interest to meet the low latency requirement of such applications. Patients that are minors or are in intensive care units (ICUs) are unable to self-report their pain conditions. The remote healthcare monitoring applications deploy IoT devices with bio-sensors capable of sensing surface electromyogram (sEMG) and electrocardiogram (ECG) signals to monitor the pain condition of such patients. In this article, fog computing architecture is proposed for deploying a remote pain monitoring system. The key motivation for adopting the fog paradigm in our proposed approach is to reduce latency and network consumption. To validate the effectiveness of the proposed approach in minimizing delay and network utilization, simulations were carried out in iFogSim and the results were compared with the cloud-based systems. The results of the simulations carried out in this research indicate that a reduction in both latency and network consumption can be achieved by adopting the proposed approach for implementing a remote pain monitoring system.}, } @article {pmid33216660, year = {2021}, author = {Krishna, R and Elisseev, V}, title = {User-centric genomics infrastructure: trends and technologies.}, journal = {Genome}, volume = {64}, number = {4}, pages = {467-475}, doi = {10.1139/gen-2020-0096}, pmid = {33216660}, issn = {1480-3321}, mesh = {Base Sequence ; Computational Biology/*methods ; Genomics/*methods ; Humans ; Software ; }, abstract = {Genomics is both a data- and compute-intensive discipline. The success of genomics depends on an adequate informatics infrastructure that can address growing data demands and enable a diverse range of resource-intensive computational activities. Designing a suitable infrastructure is a challenging task, and its success largely depends on its adoption by users. In this article, we take a user-centric view of the genomics, where users are bioinformaticians, computational biologists, and data scientists. We try to take their point of view on how traditional computational activities for genomics are expanding due to data growth, as well as the introduction of big data and cloud technologies. The changing landscape of computational activities and new user requirements will influence the design of future genomics infrastructures.}, } @article {pmid33211552, year = {2021}, author = {Van Horn, JD}, title = {Bridging the Brain and Data Sciences.}, journal = {Big data}, volume = {9}, number = {3}, pages = {153-187}, pmid = {33211552}, issn = {2167-647X}, support = {R24 MH114796/MH/NIMH NIH HHS/United States ; U24 ES026465/ES/NIEHS NIH HHS/United States ; R44 NS081792/NS/NINDS NIH HHS/United States ; }, mesh = {Brain ; *Cloud Computing ; *Data Science ; Publishing ; }, abstract = {Brain scientists are now capable of collecting more data in a single experiment than researchers a generation ago might have collected over an entire career. Indeed, the brain itself seems to thirst for more and more data. Such digital information not only comprises individual studies but is also increasingly shared and made openly available for secondary, confirmatory, and/or combined analyses. Numerous web resources now exist containing data across spatiotemporal scales. Data processing workflow technologies running via cloud-enabled computing infrastructures allow for large-scale processing. Such a move toward greater openness is fundamentally changing how brain science results are communicated and linked to available raw data and processed results. Ethical, professional, and motivational issues challenge the whole-scale commitment to data-driven neuroscience. Nevertheless, fueled by government investments into primary brain data collection coupled with increased sharing and community pressure challenging the dominant publishing model, large-scale brain and data science is here to stay.}, } @article {pmid33211312, year = {2020}, author = {Giardini, ME and Livingstone, IAT}, title = {Extending the Reach and Task-Shifting Ophthalmology Diagnostics Through Remote Visualisation.}, journal = {Advances in experimental medicine and biology}, volume = {1260}, number = {}, pages = {161-174}, pmid = {33211312}, issn = {0065-2598}, mesh = {Artificial Intelligence ; Child ; *Eye Diseases/diagnostic imaging ; Humans ; Infant, Newborn ; Ophthalmology/*trends ; *Remote Consultation ; Telemedicine/*trends ; }, abstract = {Driven by the global increase in the size and median age of the world population, sight loss is becoming a major public health challenge. Furthermore, the increased survival of premature neonates in low- and middle-income countries is causing an increase in developmental paediatric ophthalmic disease. Finally, there is an ongoing change in health-seeking behaviour worldwide, with consequent demand for increased access to healthcare, including ophthalmology. There is therefore the need to maximise the reach of resource-limited ophthalmology expertise in the context of increasing demand. Yet, ophthalmic diagnostics critically relies on visualisation, through optical imaging, of the front and of the back of the eye, and teleophthalmology, the remote visualisation of diagnostic images, shows promise to offer a viable solution.In this chapter, we first explore the strategies at the core of teleophthalmology and, in particular, real-time vs store-and-forward remote visualisation techniques, including considerations on suitability for different tasks and environments. We then introduce the key technologies suitable for teleophthalmology: anterior segment imaging, posterior segment imaging (retinal imaging) and, briefly, radiographic/tomographic techniques. We highlight enabling factors, such as high-resolution handheld imaging, high data rate mobile transmission, cloud storage and computing, 3D printing and other rapid fabrication technologies and patient and healthcare system acceptance of remote consultations. We then briefly discuss four canonical implementation settings, namely, national service provision integration, field and community screening, optometric decision support and virtual clinics, giving representative examples. We conclude with considerations on the outlook of the field, in particular, on artificial intelligence and on robotic actuation of the patient end point as a complement to televisualisation.}, } @article {pmid33211025, year = {2020}, author = {Tsai, VF and Zhuang, B and Pong, YH and Hsieh, JT and Chang, HC}, title = {Web- and Artificial Intelligence-Based Image Recognition For Sperm Motility Analysis: Verification Study.}, journal = {JMIR medical informatics}, volume = {8}, number = {11}, pages = {e20031}, pmid = {33211025}, issn = {2291-9694}, abstract = {BACKGROUND: Human sperm quality fluctuates over time. Therefore, it is crucial for couples preparing for natural pregnancy to monitor sperm motility.

OBJECTIVE: This study verified the performance of an artificial intelligence-based image recognition and cloud computing sperm motility testing system (Bemaner, Createcare) composed of microscope and microfluidic modules and designed to adapt to different types of smartphones.

METHODS: Sperm videos were captured and uploaded to the cloud with an app. Analysis of sperm motility was performed by an artificial intelligence-based image recognition algorithm then results were displayed. According to the number of motile sperm in the vision field, 47 (deidentified) videos of sperm were scored using 6 grades (0-5) by a male-fertility expert with 10 years of experience. Pearson product-moment correlation was calculated between the grades and the results (concentration of total sperm, concentration of motile sperm, and motility percentage) computed by the system.

RESULTS: Good correlation was demonstrated between the grades and results computed by the system for concentration of total sperm (r=0.65, P<.001), concentration of motile sperm (r=0.84, P<.001), and motility percentage (r=0.90, P<.001).

CONCLUSIONS: This smartphone-based sperm motility test (Bemaner) accurately measures motility-related parameters and could potentially be applied toward the following fields: male infertility detection, sperm quality test during preparation for pregnancy, and infertility treatment monitoring. With frequent at-home testing, more data can be collected to help make clinical decisions and to conduct epidemiological research.}, } @article {pmid33208108, year = {2020}, author = {Choi, JH and Kim, T and Jung, J and Joo, JWJ}, title = {Fully automated web-based tool for identifying regulatory hotspots.}, journal = {BMC genomics}, volume = {21}, number = {Suppl 10}, pages = {616}, pmid = {33208108}, issn = {1471-2164}, mesh = {Chromosome Mapping ; Internet ; Models, Statistical ; *Quantitative Trait Loci ; *Saccharomyces cerevisiae/genetics ; }, abstract = {BACKGROUND: Regulatory hotspots are genetic variations that may regulate the expression levels of many genes. It has been of great interest to find those hotspots utilizing expression quantitative trait locus (eQTL) analysis. However, it has been reported that many of the findings are spurious hotspots induced by various unknown confounding factors. Recently, methods utilizing complicated statistical models have been developed that successfully identify genuine hotspots. Next-generation Intersample Correlation Emended (NICE) is one of the methods that show high sensitivity and low false-discovery rate in finding regulatory hotspots. Even though the methods successfully find genuine hotspots, they have not been widely used due to their non-user-friendly interfaces and complex running processes. Furthermore, most of the methods are impractical due to their prohibitively high computational complexity.

RESULTS: To overcome the limitations of existing methods, we developed a fully automated web-based tool, referred to as NICER (NICE Renew), which is based on NICE program. First, we dramatically reduced running and installing burden of NICE. Second, we significantly reduced running time by incorporating multi-processing. Third, besides our web-based NICER, users can use NICER on Google Compute Engine and can readily install and run the NICER web service on their local computers. Finally, we provide different input formats and visualizations tools to show results. Utilizing a yeast dataset, we show that NICER can be successfully used in an eQTL analysis to identify many genuine regulatory hotspots, for which more than half of the hotspots were previously reported elsewhere.

CONCLUSIONS: Even though many hotspot analysis tools have been proposed, they have not been widely used for many practical reasons. NICER is a fully-automated web-based solution for eQTL mapping and regulatory hotspots analysis. NICER provides a user-friendly interface and has made hotspot analysis more viable by reducing the running time significantly. We believe that NICER will become the method of choice for increasing power of eQTL hotspot analysis.}, } @article {pmid33207820, year = {2020}, author = {Sadique, KM and Rahmani, R and Johannesson, P}, title = {IMSC-EIoTD: Identity Management and Secure Communication for Edge IoT Devices.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {22}, pages = {}, pmid = {33207820}, issn = {1424-8220}, abstract = {The Internet of things (IoT) will accommodate several billions of devices to the Internet to enhance human society as well as to improve the quality of living. A huge number of sensors, actuators, gateways, servers, and related end-user applications will be connected to the Internet. All these entities require identities to communicate with each other. The communicating devices may have mobility and currently, the only main identity solution is IP based identity management which is not suitable for the authentication and authorization of the heterogeneous IoT devices. Sometimes devices and applications need to communicate in real-time to make decisions within very short times. Most of the recently proposed solutions for identity management are cloud-based. Those cloud-based identity management solutions are not feasible for heterogeneous IoT devices. In this paper, we have proposed an edge-fog based decentralized identity management and authentication solution for IoT devices (IoTD) and edge IoT gateways (EIoTG). We have also presented a secure communication protocol for communication between edge IoT devices and edge IoT gateways. The proposed security protocols are verified using Scyther formal verification tool, which is a popular tool for automated verification of security protocols. The proposed model is specified using the PROMELA language. SPIN model checker is used to confirm the specification of the proposed model. The results show different message flows without any error.}, } @article {pmid33207813, year = {2020}, author = {Liu, H and Li, S and Sun, W}, title = {Resource Allocation for Edge Computing without Using Cloud Center in Smart Home Environment: A Pricing Approach.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {22}, pages = {}, pmid = {33207813}, issn = {1424-8220}, support = {71671159//National Natural Science Foundation of China/ ; 71971188//National Natural Science Foundation of China/ ; G2018203302//Natural Science Foundation of Hebei Province/ ; G2020203005//Natural Science Foundation of Hebei Province/ ; }, abstract = {Recently, more and more smart homes have become one of important parts of home infrastructure. However, most of the smart home applications are not interconnected and remain isolated. They use the cloud center as the control platform, which increases the risk of link congestion and data security. Thus, in the future, smart homes based on edge computing without using cloud center become an important research area. In this paper, we assume that all applications in a smart home environment are composed of edge nodes and users. In order to maximize the utility of users, we assume that all users and edge nodes are placed in a market and formulate a pricing resource allocation model with utility maximization. We apply the Lagrangian method to analyze the model, so an edge node (provider in the market) allocates its resources to a user (customer in the market) based on the prices of resources and the utility related to the preference of users. To obtain the optimal resource allocation, we propose a pricing-based resource allocation algorithm by using low-pass filtering scheme and conform that the proposed algorithm can achieve an optimum within reasonable convergence times through some numerical examples.}, } @article {pmid33205037, year = {2020}, author = {Singh, P and Kaur, R}, title = {An integrated fog and Artificial Intelligence smart health framework to predict and prevent COVID-19.}, journal = {Global transitions}, volume = {2}, number = {}, pages = {283-292}, pmid = {33205037}, issn = {2589-7918}, abstract = {Nowadays, COVID-19 is spreading at a rapid rate in almost all the continents of the world. It has already affected many people who are further spreading it day by day. Hence, it is the most essential to alert nearby people to be aware of it due to its communicable behavior. Till May 2020, no vaccine is available for the treatment of this COVID-19, but the existing technologies can be used to minimize its effect. Cloud/fog computing could be used to monitor and control this rapidly spreading infection in a cost-effective and time-saving manner. To strengthen COVID-19 patient prediction, Artificial Intelligence(AI) can be integrated with cloud/fog computing for practical solutions. In this paper, fog assisted the internet of things based quality of service framework is presented to prevent and protect from COVID-19. It provides real-time processing of users' health data to predict the COVID-19 infection by observing their symptoms and immediately generates an emergency alert, medical reports, and significant precautions to the user, their guardian as well as doctors/experts. It collects sensitive information from the hospitals/quarantine shelters through the patient IoT devices for taking necessary actions/decisions. Further, it generates an alert message to the government health agencies for controlling the outbreak of chronic illness and for tanking quick and timely actions.}, } @article {pmid33204404, year = {2020}, author = {Alanazi, SA and Kamruzzaman, MM and Alruwaili, M and Alshammari, N and Alqahtani, SA and Karime, A}, title = {Measuring and Preventing COVID-19 Using the SIR Model and Machine Learning in Smart Health Care.}, journal = {Journal of healthcare engineering}, volume = {2020}, number = {}, pages = {8857346}, pmid = {33204404}, issn = {2040-2309}, mesh = {Algorithms ; Basic Reproduction Number/statistics & numerical data ; Biomedical Engineering ; COVID-19/epidemiology/*prevention & control ; Computer Simulation ; Delivery of Health Care ; Disease Susceptibility/epidemiology ; Female ; Forecasting ; Humans ; *Machine Learning ; Male ; *Models, Biological ; Pandemics/*prevention & control/statistics & numerical data ; Physical Distancing ; Quarantine ; SARS-CoV-2 ; Saudi Arabia/epidemiology ; Stochastic Processes ; }, abstract = {COVID-19 presents an urgent global challenge because of its contagious nature, frequently changing characteristics, and the lack of a vaccine or effective medicines. A model for measuring and preventing the continued spread of COVID-19 is urgently required to provide smart health care services. This requires using advanced intelligent computing such as artificial intelligence, machine learning, deep learning, cognitive computing, cloud computing, fog computing, and edge computing. This paper proposes a model for predicting COVID-19 using the SIR and machine learning for smart health care and the well-being of the citizens of KSA. Knowing the number of susceptible, infected, and recovered cases each day is critical for mathematical modeling to be able to identify the behavioral effects of the pandemic. It forecasts the situation for the upcoming 700 days. The proposed system predicts whether COVID-19 will spread in the population or die out in the long run. Mathematical analysis and simulation results are presented here as a means to forecast the progress of the outbreak and its possible end for three types of scenarios: "no actions," "lockdown," and "new medicines." The effect of interventions like lockdown and new medicines is compared with the "no actions" scenario. The lockdown case delays the peak point by decreasing the infection and affects the area equality rule of the infected curves. On the other side, new medicines have a significant impact on infected curve by decreasing the number of infected people about time. Available forecast data on COVID-19 using simulations predict that the highest level of cases might occur between 15 and 30 November 2020. Simulation data suggest that the virus might be fully under control only after June 2021. The reproductive rate shows that measures such as government lockdowns and isolation of individuals are not enough to stop the pandemic. This study recommends that authorities should, as soon as possible, apply a strict long-term containment strategy to reduce the epidemic size successfully.}, } @article {pmid33200116, year = {2020}, author = {Gorgulla, C and Padmanabha Das, KM and Leigh, KE and Cespugli, M and Fischer, PD and Wang, ZF and Tesseyre, G and Pandita, S and Shnapir, A and Calderaio, A and Gechev, M and Rose, A and Lewis, N and Hutcheson, C and Yaffe, E and Luxenburg, R and Herce, HD and Durmaz, V and Halazonetis, TD and Fackeldey, K and Patten, JJ and Chuprina, A and Dziuba, I and Plekhova, A and Moroz, Y and Radchenko, D and Tarkhanova, O and Yavnyuk, I and Gruber, C and Yust, R and Payne, D and Näär, AM and Namchuk, MN and Davey, RA and Wagner, G and Kinney, J and Arthanari, H}, title = {A Multi-Pronged Approach Targeting SARS-CoV-2 Proteins Using Ultra-Large Virtual Screening.}, journal = {ChemRxiv : the preprint server for chemistry}, volume = {}, number = {}, pages = {}, pmid = {33200116}, issn = {2573-2293}, support = {R01 GM136859/GM/NIGMS NIH HHS/United States ; }, abstract = {Severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), previously known as 2019 novel coronavirus (2019-nCoV), has spread rapidly across the globe, creating an unparalleled global health burden and spurring a deepening economic crisis. As of July 7th, 2020, almost seven months into the outbreak, there are no approved vaccines and few treatments available. Developing drugs that target multiple points in the viral life cycle could serve as a strategy to tackle the current as well as future coronavirus pandemics. Here we leverage the power of our recently developed in silico screening platform, VirtualFlow, to identify inhibitors that target SARS-CoV-2. VirtualFlow is able to efficiently harness the power of computing clusters and cloud-based computing platforms to carry out ultra-large scale virtual screens. In this unprecedented structure-based multi-target virtual screening campaign, we have used VirtualFlow to screen an average of approximately 1 billion molecules against each of 40 different target sites on 17 different potential viral and host targets in the cloud. In addition to targeting the active sites of viral enzymes, we also target critical auxiliary sites such as functionally important protein-protein interaction interfaces. This multi-target approach not only increases the likelihood of finding a potent inhibitor, but could also help identify a collection of anti-coronavirus drugs that would retain efficacy in the face of viral mutation. Drugs belonging to different regimen classes could be combined to develop possible combination therapies, and top hits that bind at highly conserved sites would be potential candidates for further development as coronavirus drugs. Here, we present the top 200 in silico hits for each target site. While in-house experimental validation of some of these compounds is currently underway, we want to make this array of potential inhibitor candidates available to researchers worldwide in consideration of the pressing need for fast-tracked drug development.}, } @article {pmid33194451, year = {2020}, author = {Hesselmann, G}, title = {No conclusive evidence that difficult general knowledge questions cause a "Google Stroop effect". A replication study.}, journal = {PeerJ}, volume = {8}, number = {}, pages = {e10325}, pmid = {33194451}, issn = {2167-8359}, abstract = {Access to the digital "all-knowing cloud" has become an integral part of our daily lives. It has been suggested that the increasing offloading of information and information processing services to the cloud will alter human cognition and metacognition in the short and long term. A much-cited study published in Science in 2011 provided first behavioral evidence for such changes in human cognition. Participants had to answer difficult trivia questions, and subsequently showed longer response times in a variant of the Stroop task with internet-related words ("Google Stroop effect"). The authors of this study concluded that the concept of the Internet is automatically activated in situations where information is missing (e.g., because we might feel the urge to "google" the information). However, the "Google Stroop effect" could not be replicated in two recent replication attempts as part of a large replicability project. After the failed replication was published in 2018, the first author of the original study pointed out some problems with the design of the failed replication. In our study, we therefore aimed to replicate the "Google Stroop effect" with a research design closer to the original experiment. Our results revealed no conclusive evidence in favor of the notion that the concept of the Internet or internet access (via computers or smartphones) is automatically activated when participants are faced with hard trivia questions. We provide recommendations for follow-up research.}, } @article {pmid33193602, year = {2020}, author = {Guerra-Assunção, JA and Conde, L and Moghul, I and Webster, AP and Ecker, S and Chervova, O and Chatzipantsiou, C and Prieto, PP and Beck, S and Herrero, J}, title = {GenomeChronicler: The Personal Genome Project UK Genomic Report Generator Pipeline.}, journal = {Frontiers in genetics}, volume = {11}, number = {}, pages = {518644}, pmid = {33193602}, issn = {1664-8021}, abstract = {In recent years, there has been a significant increase in whole genome sequencing data of individual genomes produced by research projects as well as direct to consumer service providers. While many of these sources provide their users with an interpretation of the data, there is a lack of free, open tools for generating reports exploring the data in an easy to understand manner. GenomeChronicler was developed as part of the Personal Genome Project UK (PGP-UK) to address this need. PGP-UK provides genomic, transcriptomic, epigenomic and self-reported phenotypic data under an open-access model with full ethical approval. As a result, the reports generated by GenomeChronicler are intended for research purposes only and include information relating to potentially beneficial and potentially harmful variants, but without clinical curation. GenomeChronicler can be used with data from whole genome or whole exome sequencing, producing a genome report containing information on variant statistics, ancestry and known associated phenotypic traits. Example reports are available from the PGP-UK data page (personalgenomes.org.uk/data). The objective of this method is to leverage existing resources to find known phenotypes associated with the genotypes detected in each sample. The provided trait data is based primarily upon information available in SNPedia, but also collates data from ClinVar, GETevidence, and gnomAD to provide additional details on potential health implications, presence of genotype in other PGP participants and population frequency of each genotype. The analysis can be run in a self-contained environment without requiring internet access, making it a good choice for cases where privacy is essential or desired: any third party project can embed GenomeChronicler within their off-line safe-haven environments. GenomeChronicler can be run for one sample at a time, or in parallel making use of the Nextflow workflow manager. The source code is available from GitHub (https://github.com/PGP-UK/GenomeChronicler), container recipes are available for Docker and Singularity, as well as a pre-built container from SingularityHub (https://singularity-hub.org/collections/3664) enabling easy deployment in a variety of settings. Users without access to computational resources to run GenomeChronicler can access the software from the Lifebit CloudOS platform (https://lifebit.ai/cloudos) enabling the production of reports and variant calls from raw sequencing data in a scalable fashion.}, } @article {pmid33192434, year = {2020}, author = {Kirkland, P and Di Caterina, G and Soraghan, J and Matich, G}, title = {Perception Understanding Action: Adding Understanding to the Perception Action Cycle With Spiking Segmentation.}, journal = {Frontiers in neurorobotics}, volume = {14}, number = {}, pages = {568319}, pmid = {33192434}, issn = {1662-5218}, abstract = {Traditionally the Perception Action cycle is the first stage of building an autonomous robotic system and a practical way to implement a low latency reactive system within a low Size, Weight and Power (SWaP) package. However, within complex scenarios, this method can lack contextual understanding about the scene, such as object recognition-based tracking or system attention. Object detection, identification and tracking along with semantic segmentation and attention are all modern computer vision tasks in which Convolutional Neural Networks (CNN) have shown significant success, although such networks often have a large computational overhead and power requirements, which are not ideal in smaller robotics tasks. Furthermore, cloud computing and massively parallel processing like in Graphic Processing Units (GPUs) are outside the specification of many tasks due to their respective latency and SWaP constraints. In response to this, Spiking Convolutional Neural Networks (SCNNs) look to provide the feature extraction benefits of CNNs, while maintaining low latency and power overhead thanks to their asynchronous spiking event-based processing. A novel Neuromorphic Perception Understanding Action (PUA) system is presented, that aims to combine the feature extraction benefits of CNNs with low latency processing of SCNNs. The PUA utilizes a Neuromorphic Vision Sensor for Perception that facilitates asynchronous processing within a Spiking fully Convolutional Neural Network (SpikeCNN) to provide semantic segmentation and Understanding of the scene. The output is fed to a spiking control system providing Actions. With this approach, the aim is to bring features of deep learning into the lower levels of autonomous robotics, while maintaining a biologically plausible STDP rule throughout the learned encoding part of the network. The network will be shown to provide a more robust and predictable management of spiking activity with an improved thresholding response. The reported experiments show that this system can deliver robust results of over 96 and 81% for accuracy and Intersection over Union, ensuring such a system can be successfully used within object recognition, classification and tracking problem. This demonstrates that the attention of the system can be tracked accurately, while the asynchronous processing means the controller can give precise track updates with minimal latency.}, } @article {pmid33187267, year = {2020}, author = {Hamdan, S and Ayyash, M and Almajali, S}, title = {Edge-Computing Architectures for Internet of Things Applications: A Survey.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {22}, pages = {}, pmid = {33187267}, issn = {1424-8220}, abstract = {The rapid growth of the Internet of Things (IoT) applications and their interference with our daily life tasks have led to a large number of IoT devices and enormous sizes of IoT-generated data. The resources of IoT devices are limited; therefore, the processing and storing IoT data in these devices are inefficient. Traditional cloud-computing resources are used to partially handle some of the IoT resource-limitation issues; however, using the resources in cloud centers leads to other issues, such as latency in time-critical IoT applications. Therefore, edge-cloud-computing technology has recently evolved. This technology allows for data processing and storage at the edge of the network. This paper studies, in-depth, edge-computing architectures for IoT (ECAs-IoT), and then classifies them according to different factors such as data placement, orchestration services, security, and big data. Besides, the paper studies each architecture in depth and compares them according to various features. Additionally, ECAs-IoT is mapped according to two existing IoT layered models, which helps in identifying the capabilities, features, and gaps of every architecture. Moreover, the paper presents the most important limitations of existing ECAs-IoT and recommends solutions to them. Furthermore, this survey details the IoT applications in the edge-computing domain. Lastly, the paper recommends four different scenarios for using ECAs-IoT by IoT applications.}, } @article {pmid33185051, year = {2020}, author = {LaRochelle, EPM and Pogue, BW}, title = {Theoretical lateral and axial sensitivity limits and choices of molecular reporters for Cherenkov-excited luminescence in tissue during x-ray beam scanning.}, journal = {Journal of biomedical optics}, volume = {25}, number = {11}, pages = {}, pmid = {33185051}, issn = {1560-2281}, support = {P30 CA023108/CA/NCI NIH HHS/United States ; R01 EB024498/EB/NIBIB NIH HHS/United States ; }, mesh = {*Luminescence ; Monte Carlo Method ; Phantoms, Imaging ; *Photons ; X-Rays ; }, abstract = {PURPOSE: Unlike fluorescence imaging utilizing an external excitation source, Cherenkov emissions and Cherenkov-excited luminescence occur within a medium when irradiated with high-energy x-rays. Methods to improve the understanding of the lateral spread and axial depth distribution of these emissions are needed as an initial step to improve the overall system resolution.

METHODS: Monte Carlo simulations were developed to investigate the lateral spread of thin sheets of high-energy sources and compared to experimental measurements of similar sources in water. Additional simulations of a multilayer skin model were used to investigate the limits of detection using both 6- and 18-MV x-ray sources with fluorescence excitation for inclusion depths up to 1 cm.

RESULTS: Simulations comparing the lateral spread of high-energy sources show approximately 100  ×   higher optical yield from electrons than photons, although electrons showed a larger penumbra in both the simulations and experimental measurements. Cherenkov excitation has a roughly inverse wavelength squared dependence in intensity but is largely redshifted in excitation through any distance of tissue. The calculated emission spectra in tissue were convolved with a database of luminescent compounds to produce a computational ranking of potential Cherenkov-excited luminescence molecular contrast agents.

CONCLUSIONS: Models of thin x-ray and electron sources were compared with experimental measurements, showing similar trends in energy and source type. Surface detection of Cherenkov-excited luminescence appears to be limited by the mean free path of the luminescence emission, where for the given simulation only 2% of the inclusion emissions reached the surface from a depth of 7 mm in a multilayer tissue model.}, } @article {pmid33178415, year = {2020}, author = {Zasada, SJ and Wright, DW and Coveney, PV}, title = {Large-scale binding affinity calculations on commodity compute clouds.}, journal = {Interface focus}, volume = {10}, number = {6}, pages = {20190133}, pmid = {33178415}, issn = {2042-8898}, support = {MR/L016311/1/MRC_/Medical Research Council/United Kingdom ; }, abstract = {In recent years, it has become possible to calculate binding affinities of compounds bound to proteins via rapid, accurate, precise and reproducible free energy calculations. This is imperative in drug discovery as well as personalized medicine. This approach is based on molecular dynamics (MD) simulations and draws on sequence and structural information of the protein and compound concerned. Free energies are determined by ensemble averages of many MD replicas, each of which requires hundreds of cores and/or GPU accelerators, which are now available on commodity cloud computing platforms; there are also requirements for initial model building and subsequent data analysis stages. To automate the process, we have developed a workflow known as the binding affinity calculator. In this paper, we focus on the software infrastructure and interfaces that we have developed to automate the overall workflow and execute it on commodity cloud platforms, in order to reliably predict their binding affinities on time scales relevant to the domains of application, and illustrate its application to two free energy methods.}, } @article {pmid33177037, year = {2020}, author = {Jeon, S and Seo, J and Kim, S and Lee, J and Kim, JH and Sohn, JW and Moon, J and Joo, HJ}, title = {Proposal and Assessment of a De-Identification Strategy to Enhance Anonymity of the Observational Medical Outcomes Partnership Common Data Model (OMOP-CDM) in a Public Cloud-Computing Environment: Anonymization of Medical Data Using Privacy Models.}, journal = {Journal of medical Internet research}, volume = {22}, number = {11}, pages = {e19597}, pmid = {33177037}, issn = {1438-8871}, mesh = {Cloud Computing/*standards ; Confidentiality/*standards ; Data Anonymization/*standards ; Databases, Factual/*standards ; Humans ; Medical Informatics/*methods ; }, abstract = {BACKGROUND: De-identifying personal information is critical when using personal health data for secondary research. The Observational Medical Outcomes Partnership Common Data Model (CDM), defined by the nonprofit organization Observational Health Data Sciences and Informatics, has been gaining attention for its use in the analysis of patient-level clinical data obtained from various medical institutions. When analyzing such data in a public environment such as a cloud-computing system, an appropriate de-identification strategy is required to protect patient privacy.

OBJECTIVE: This study proposes and evaluates a de-identification strategy that is comprised of several rules along with privacy models such as k-anonymity, l-diversity, and t-closeness. The proposed strategy was evaluated using the actual CDM database.

METHODS: The CDM database used in this study was constructed by the Anam Hospital of Korea University. Analysis and evaluation were performed using the ARX anonymizing framework in combination with the k-anonymity, l-diversity, and t-closeness privacy models.

RESULTS: The CDM database, which was constructed according to the rules established by Observational Health Data Sciences and Informatics, exhibited a low risk of re-identification: The highest re-identifiable record rate (11.3%) in the dataset was exhibited by the DRUG_EXPOSURE table, with a re-identification success rate of 0.03%. However, because all tables include at least one "highest risk" value of 100%, suitable anonymizing techniques are required; moreover, the CDM database preserves the "source values" (raw data), a combination of which could increase the risk of re-identification. Therefore, this study proposes an enhanced strategy to de-identify the source values to significantly reduce not only the highest risk in the k-anonymity, l-diversity, and t-closeness privacy models but also the overall possibility of re-identification.

CONCLUSIONS: Our proposed de-identification strategy effectively enhanced the privacy of the CDM database, thereby encouraging clinical research involving multiple centers.}, } @article {pmid33172017, year = {2020}, author = {Cecilia, JM and Cano, JC and Morales-García, J and Llanes, A and Imbernón, B}, title = {Evaluation of Clustering Algorithms on GPU-Based Edge Computing Platforms.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {21}, pages = {}, pmid = {33172017}, issn = {1424-8220}, support = {RYC2018-025580-I//Ministerio de Ciencia e Innovación/ ; RTI2018-096384-B-I00//Ministerio de Ciencia e Innovación/ ; RTC2019-007159-5//Ministerio de Ciencia e Innovación/ ; 20813/PI/18//Fundación Séneca/ ; }, abstract = {Internet of Things (IoT) is becoming a new socioeconomic revolution in which data and immediacy are the main ingredients. IoT generates large datasets on a daily basis but it is currently considered as "dark data", i.e., data generated but never analyzed. The efficient analysis of this data is mandatory to create intelligent applications for the next generation of IoT applications that benefits society. Artificial Intelligence (AI) techniques are very well suited to identifying hidden patterns and correlations in this data deluge. In particular, clustering algorithms are of the utmost importance for performing exploratory data analysis to identify a set (a.k.a., cluster) of similar objects. Clustering algorithms are computationally heavy workloads and require to be executed on high-performance computing clusters, especially to deal with large datasets. This execution on HPC infrastructures is an energy hungry procedure with additional issues, such as high-latency communications or privacy. Edge computing is a paradigm to enable light-weight computations at the edge of the network that has been proposed recently to solve these issues. In this paper, we provide an in-depth analysis of emergent edge computing architectures that include low-power Graphics Processing Units (GPUs) to speed-up these workloads. Our analysis includes performance and power consumption figures of the latest Nvidia's AGX Xavier to compare the energy-performance ratio of these low-cost platforms with a high-performance cloud-based counterpart version. Three different clustering algorithms (i.e., k-means, Fuzzy Minimals (FM), and Fuzzy C-Means (FCM)) are designed to be optimally executed on edge and cloud platforms, showing a speed-up factor of up to 11× for the GPU code compared to sequential counterpart versions in the edge platforms and energy savings of up to 150% between the edge computing and HPC platforms.}, } @article {pmid33171714, year = {2020}, author = {Ghazal, M and Basmaji, T and Yaghi, M and Alkhedher, M and Mahmoud, M and El-Baz, AS}, title = {Cloud-Based Monitoring of Thermal Anomalies in Industrial Environments Using AI and the Internet of Robotic Things.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {21}, pages = {}, pmid = {33171714}, issn = {1424-8220}, abstract = {Recent advancements in cloud computing, artificial intelligence, and the internet of things (IoT) create new opportunities for autonomous industrial environments monitoring. Nevertheless, detecting anomalies in harsh industrial settings remains challenging. This paper proposes an edge-fog-cloud architecture with mobile IoT edge nodes carried on autonomous robots for thermal anomalies detection in aluminum factories. We use companion drones as fog nodes to deliver first response services and a cloud back-end for thermal anomalies analysis. We also propose a self-driving deep learning architecture and a thermal anomalies detection and visualization algorithm. Our results show our robot surveyors are low-cost, deliver reduced response time, and more accurately detect anomalies compared to human surveyors or fixed IoT nodes monitoring the same industrial area. Our self-driving architecture has a root mean square error of 0.19 comparable to VGG-19 with a significantly reduced complexity and three times the frame rate at 60 frames per second. Our thermal to visual registration algorithm maximizes mutual information in the image-gradient domain while adapting to different resolutions and camera frame rates.}, } @article {pmid33171646, year = {2020}, author = {Kołakowska, A and Szwoch, W and Szwoch, M}, title = {A Review of Emotion Recognition Methods Based on Data Acquired via Smartphone Sensors.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {21}, pages = {}, pmid = {33171646}, issn = {1424-8220}, mesh = {*Algorithms ; Bayes Theorem ; *Emotions ; Humans ; *Machine Learning ; *Smartphone ; }, abstract = {In recent years, emotion recognition algorithms have achieved high efficiency, allowing the development of various affective and affect-aware applications. This advancement has taken place mainly in the environment of personal computers offering the appropriate hardware and sufficient power to process complex data from video, audio, and other channels. However, the increase in computing and communication capabilities of smartphones, the variety of their built-in sensors, as well as the availability of cloud computing services have made them an environment in which the task of recognising emotions can be performed at least as effectively. This is possible and particularly important due to the fact that smartphones and other mobile devices have become the main computer devices used by most people. This article provides a systematic overview of publications from the last 10 years related to emotion recognition methods using smartphone sensors. The characteristics of the most important sensors in this respect are presented, and the methods applied to extract informative features on the basis of data read from these input channels. Then, various machine learning approaches implemented to recognise emotional states are described.}, } @article {pmid33163154, year = {2019}, author = {Wibberg, D and Batut, B and Belmann, P and Blom, J and Glöckner, FO and Grüning, B and Hoffmann, N and Kleinbölting, N and Rahn, R and Rey, M and Scholz, U and Sharan, M and Tauch, A and Trojahn, U and Usadel, B and Kohlbacher, O}, title = {The de.NBI / ELIXIR-DE training platform - Bioinformatics training in Germany and across Europe within ELIXIR.}, journal = {F1000Research}, volume = {8}, number = {}, pages = {}, pmid = {33163154}, issn = {2046-1402}, mesh = {Computational Biology/*education ; Europe ; Germany ; Humans ; }, abstract = {The German Network for Bioinformatics Infrastructure (de.NBI) is a national and academic infrastructure funded by the German Federal Ministry of Education and Research (BMBF). The de.NBI provides (i) service, (ii) training, and (iii) cloud computing to users in life sciences research and biomedicine in Germany and Europe and (iv) fosters the cooperation of the German bioinformatics community with international network structures. The de.NBI members also run the German node (ELIXIR-DE) within the European ELIXIR infrastructure. The de.NBI / ELIXIR-DE training platform, also known as special interest group 3 (SIG 3) 'Training & Education', coordinates the bioinformatics training of de.NBI and the German ELIXIR node. The network provides a high-quality, coherent, timely, and impactful training program across its eight service centers. Life scientists learn how to handle and analyze biological big data more effectively by applying tools, standards and compute services provided by de.NBI. Since 2015, more than 300 training courses were carried out with about 6,000 participants and these courses received recommendation rates of almost 90% (status as of July 2020). In addition to face-to-face training courses, online training was introduced on the de.NBI website in 2016 and guidelines for the preparation of e-learning material were established in 2018. In 2016, ELIXIR-DE joined the ELIXIR training platform. Here, the de.NBI / ELIXIR-DE training platform collaborates with ELIXIR in training activities, advertising training courses via TeSS and discussions on the exchange of data for training events essential for quality assessment on both the technical and administrative levels. The de.NBI training program trained thousands of scientists from Germany and beyond in many different areas of bioinformatics.}, } @article {pmid33163255, year = {2020}, author = {Bremer, E and Saltz, J and Almeida, JS}, title = {ImageBox 2 - Efficient and Rapid Access of Image Tiles from Whole-Slide Images Using Serverless HTTP Range Requests.}, journal = {Journal of pathology informatics}, volume = {11}, number = {}, pages = {29}, pmid = {33163255}, issn = {2229-5089}, abstract = {BACKGROUND: Whole-slide images (WSI) are produced by a high-resolution scanning of pathology glass slides. There are a large number of whole-slide imaging scanners, and the resulting images are frequently larger than 100,000 × 100,000 pixels which typically image 100,000 to one million cells, ranging from several hundred megabytes to many gigabytes in size.

AIMS AND OBJECTIVES: Provide HTTP access over the web to Whole Slide Image tiles that do not have localized tiling servers but only basic HTTP access. Move all image decode and tiling functions to calling agent (ImageBox).

METHODS: Current software systems require tiling image servers to be installed on systems providing local disk access to these images. ImageBox2 breaks this requirement by accessing tiles from remote HTTP source via byte-level HTTP range requests. This method does not require changing the client software as the operation is relegated to the ImageBox2 server which is local (or remote) to the client and can access tiles from remote images that have no server of their own such as Amazon S3 hosted images. That is, it provides a data service [on a server that does not need to be managed], the definition of serverless execution model increasingly favored by cloud computing infrastructure.

CONCLUSIONS: The specific methodology described and assessed in this report preserves normal client connection semantics by enabling cloud-friendly tiling, promoting a web of http connected whole-slide images from a wide-ranging number of sources, and providing tiling where local tiling servers would have been otherwise unavailable.}, } @article {pmid33162127, year = {2021}, author = {Bergier, I and Papa, M and Silva, R and Santos, PM}, title = {Cloud/edge computing for compliance in the Brazilian livestock supply chain.}, journal = {The Science of the total environment}, volume = {761}, number = {}, pages = {143276}, doi = {10.1016/j.scitotenv.2020.143276}, pmid = {33162127}, issn = {1879-1026}, abstract = {Brazil is an important player in the global agribusiness markets, in which grain and beef make up the majority of exports. Barriers to access more valuable sustainable markets emerge from the lack of adequate compliance in supply chains. Here is depicted a mobile application based on cloud/edge computing for the livestock supply chain to circumvent that limitation. The application, called BovChain, is a peer-to-peer (P2P) network connecting landowners and slaughterhouses. The objective of the application is twofold. Firstly, it maximizes sustainable business by reducing transaction costs and by strengthening ties between state-authorized stakeholders. Secondly, it creates metadata useful for digital certification by exploiting CMOS and GPS sensor technologies embedded in low-cost smartphones. Successful declarative transactions in the digital space are recorded as metadata, and the corresponding big data might be valuable for the certification of livestock origin and traceability for sustainability compliance in 'glocal' beef markets.}, } @article {pmid33151974, year = {2020}, author = {Hanif, M and Lee, C and Helal, S}, title = {Predictive topology refinements in distributed stream processing system.}, journal = {PloS one}, volume = {15}, number = {11}, pages = {e0240424}, pmid = {33151974}, issn = {1932-6203}, mesh = {Algorithms ; *Big Data ; Cloud Computing/*standards ; Computer Communication Networks/*standards ; *Quality Control ; Workload ; }, abstract = {Cloud computing has evolved the big data technologies to a consolidated paradigm with SPaaS (Streaming processing-as-a-service). With a number of enterprises offering cloud-based solutions to end-users and other small enterprises, there has been a boom in the volume of data, creating interest of both industry and academia in big data analytics, streaming applications, and social networking applications. With the companies shifting to cloud-based solutions as a service paradigm, the competition grows in the market. Good quality of service (QoS) is a must for the enterprises, as they strive to survive in a competitive environment. However, achieving reasonable QoS goals to meet SLA agreement cost-effectively is challenging due to variation in workload over time. This problem can be solved if the system has the ability to predict the workload for the near future. In this paper, we present a novel topology-refining scheme based on a workload prediction mechanism. Predictions are made through a model based on a combination of SVR, autoregressive, and moving average model with a feedback mechanism. Our streaming system is designed to increase the overall performance by making the topology refining robust to the incoming workload on the fly, while still being able to achieve QoS goals of SLA constraints. Apache Flink distributed processing engine is used as a testbed in the paper. The result shows that the prediction scheme works well for both workloads, i.e., synthetic as well as real traces of data.}, } @article {pmid33150404, year = {2021}, author = {Wang, G and Wignall, J and Kinard, D and Singh, V and Foster, C and Adams, S and Pratt, W and Desai, AD}, title = {An implementation model for managing cloud-based longitudinal care plans for children with medical complexity.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {28}, number = {1}, pages = {23-32}, pmid = {33150404}, issn = {1527-974X}, support = {K08 HS024299/HS/AHRQ HHS/United States ; }, mesh = {Adult ; Caregivers ; Child ; Chronic Disease/therapy ; *Cloud Computing ; Health Information Exchange ; Health Insurance Portability and Accountability Act ; Health Personnel ; Humans ; *Patient Care Planning/organization & administration ; *Patient Care Team ; Pediatrics ; United States ; }, abstract = {OBJECTIVE: We aimed to iteratively refine an implementation model for managing cloud-based longitudinal care plans (LCPs) for children with medical complexity (CMC).

MATERIALS AND METHODS: We conducted iterative 1-on-1 design sessions with CMC caregivers (ie, parents/legal guardians) and providers between August 2017 and March 2019. During audio-recorded sessions, we asked participants to walk through role-specific scenarios of how they would create, review, and edit an LCP using a cloud-based prototype, which we concurrently developed. Between sessions, we reviewed audio recordings to identify strategies that would mitigate barriers that participants reported relating to 4 processes for managing LCPs: (1) taking ownership, (2) sharing, (3) reviewing, and (4) editing. Analysis informed iterative implementation model revisions.

RESULTS: We conducted 30 design sessions, with 10 caregivers and 20 providers. Participants emphasized that cloud-based LCPs required a team of owners: the caregiver(s), a caregiver-designated clinician, and a care coordinator. Permission settings would need to include universal accessibility for emergency providers, team-level permission options, and some editing restrictions for caregivers. Notifications to review and edit the LCP should be sent to team members before and after clinic visits and after hospital encounters. Mitigating double documentation barriers would require alignment of data fields between the LCP and electronic health record to maximize interoperability.

DISCUSSION: These findings provide a model for how we may leverage emerging Health Insurance Portability and Accountability Act-compliant cloud computing technologies to support families and providers in comanaging health information for CMC.

CONCLUSIONS: Utilizing these management strategies when implementing cloud-based LCPs has the potential to improve team-based care across settings.}, } @article {pmid33150354, year = {2021}, author = {Long, A and Glogowski, A and Meppiel, M and De Vito, L and Engle, E and Harris, M and Ha, G and Schneider, D and Gabrielian, A and Hurt, DE and Rosenthal, A}, title = {The technology behind TB DEPOT: a novel public analytics platform integrating tuberculosis clinical, genomic, and radiological data for visual and statistical exploration.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {28}, number = {1}, pages = {71-79}, pmid = {33150354}, issn = {1527-974X}, mesh = {Computational Biology ; Databases as Topic ; Genomics ; Humans ; *Internet ; *Medical Informatics Applications ; National Institute of Allergy and Infectious Diseases (U.S.) ; Radiology ; Software ; *Tuberculosis/diagnosis/drug therapy/genetics/prevention & control ; United States ; }, abstract = {OBJECTIVE: Clinical research informatics tools are necessary to support comprehensive studies of infectious diseases. The National Institute of Allergy and Infectious Diseases (NIAID) developed the publicly accessible Tuberculosis Data Exploration Portal (TB DEPOT) to address the complex etiology of tuberculosis (TB).

MATERIALS AND METHODS: TB DEPOT displays deidentified patient case data and facilitates analyses across a wide range of clinical, socioeconomic, genomic, and radiological factors. The solution is built using Amazon Web Services cloud-based infrastructure, .NET Core, Angular, Highcharts, R, PLINK, and other custom-developed services. Structured patient data, pathogen genomic variants, and medical images are integrated into the solution to allow seamless filtering across data domains.

RESULTS: Researchers can use TB DEPOT to query TB patient cases, create and save patient cohorts, and execute comparative statistical analyses on demand. The tool supports user-driven data exploration and fulfills the National Institute of Health's Findable, Accessible, Interoperable, and Reusable (FAIR) principles.

DISCUSSION: TB DEPOT is the first tool of its kind in the field of TB research to integrate multidimensional data from TB patient cases. Its scalable and flexible architectural design has accommodated growth in the data, organizations, types of data, feature requests, and usage. Use of client-side technologies over server-side technologies and prioritizing maintenance have been important lessons learned. Future directions are dynamically prioritized and key functionality is shared through an application programming interface.

CONCLUSION: This paper describes the platform development methodology, resulting functionality, benefits, and technical considerations of a clinical research informatics application to support increased understanding of TB.}, } @article {pmid33150095, year = {2020}, author = {Frontoni, E and Romeo, L and Bernardini, M and Moccia, S and Migliorelli, L and Paolanti, M and Ferri, A and Misericordia, P and Mancini, A and Zingaretti, P}, title = {A Decision Support System for Diabetes Chronic Care Models Based on General Practitioner Engagement and EHR Data Sharing.}, journal = {IEEE journal of translational engineering in health and medicine}, volume = {8}, number = {}, pages = {3000112}, pmid = {33150095}, issn = {2168-2372}, abstract = {Objective Decision support systems (DSS) have been developed and promoted for their potential to improve quality of health care. However, there is a lack of common clinical strategy and a poor management of clinical resources and erroneous implementation of preventive medicine. Methods To overcome this problem, this work proposed an integrated system that relies on the creation and sharing of a database extracted from GPs' Electronic Health Records (EHRs) within the Netmedica Italian (NMI) cloud infrastructure. Although the proposed system is a pilot application specifically tailored for improving the chronic Type 2 Diabetes (T2D) care it could be easily targeted to effectively manage different chronic-diseases. The proposed DSS is based on EHR structure used by GPs in their daily activities following the most updated guidelines in data protection and sharing. The DSS is equipped with a Machine Learning (ML) method for analyzing the shared EHRs and thus tackling the high variability of EHRs. A novel set of T2D care-quality indicators are used specifically to determine the economic incentives and the T2D features are presented as predictors of the proposed ML approach. Results The EHRs from 41237 T2D patients were analyzed. No additional data collection, with respect to the standard clinical practice, was required. The DSS exhibited competitive performance (up to an overall accuracy of 98%±2% and macro-recall of 96%±1%) for classifying chronic care quality across the different follow-up phases. The chronic care quality model brought to a significant increase (up to 12%) of the T2D patients without complications. For GPs who agreed to use the proposed system, there was an economic incentive. A further bonus was assigned when performance targets are achieved. Conclusions The quality care evaluation in a clinical use-case scenario demonstrated how the empowerment of the GPs through the use of the platform (integrating the proposed DSS), along with the economic incentives, may speed up the improvement of care.}, } @article {pmid33143038, year = {2020}, author = {Chukhno, O and Chukhno, N and Araniti, G and Campolo, C and Iera, A and Molinaro, A}, title = {Optimal Placement of Social Digital Twins in Edge IoT Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {21}, pages = {}, pmid = {33143038}, issn = {1424-8220}, support = {813278//H2020 Marie Skłodowska-Curie Actions/ ; }, abstract = {In next-generation Internet of Things (IoT) deployments, every object such as a wearable device, a smartphone, a vehicle, and even a sensor or an actuator will be provided with a digital counterpart (twin) with the aim of augmenting the physical object's capabilities and acting on its behalf when interacting with third parties. Moreover, such objects can be able to interact and autonomously establish social relationships according to the Social Internet of Things (SIoT) paradigm. In such a context, the goal of this work is to provide an optimal solution for the social-aware placement of IoT digital twins (DTs) at the network edge, with the twofold aim of reducing the latency (i) between physical devices and corresponding DTs for efficient data exchange, and (ii) among DTs of friend devices to speed-up the service discovery and chaining procedures across the SIoT network. To this aim, we formulate the problem as a mixed-integer linear programming model taking into account limited computing resources in the edge cloud and social relationships among IoT devices.}, } @article {pmid33140820, year = {2021}, author = {Wang, YL and Wang, F and Shi, XX and Jia, CY and Wu, FX and Hao, GF and Yang, GF}, title = {Cloud 3D-QSAR: a web tool for the development of quantitative structure-activity relationship models in drug discovery.}, journal = {Briefings in bioinformatics}, volume = {22}, number = {4}, pages = {}, doi = {10.1093/bib/bbaa276}, pmid = {33140820}, issn = {1477-4054}, mesh = {*Drug Design ; *Drug Discovery ; *Internet ; Quantitative Structure-Activity Relationship ; *Software ; }, abstract = {Effective drug discovery contributes to the treatment of numerous diseases but is limited by high costs and long cycles. The Quantitative Structure-Activity Relationship (QSAR) method was introduced to evaluate the activity of a large number of compounds virtually, reducing the time and labor costs required for chemical synthesis and experimental determination. Hence, this method increases the efficiency of drug discovery. To meet the needs of researchers to utilize this technology, numerous QSAR-related web servers, such as Web-4D-QSAR and DPubChem, have been developed in recent years. However, none of the servers mentioned above can perform a complete QSAR modeling and supply activity prediction functions. We introduce Cloud 3D-QSAR by integrating the functions of molecular structure generation, alignment, molecular interaction field (MIF) computing and results analysis to provide a one-stop solution. We rigidly validated this server, and the activity prediction correlation was R2 = 0.934 in 834 test molecules. The sensitivity, specificity and accuracy were 86.9%, 94.5% and 91.5%, respectively, with AUC = 0.981, AUCPR = 0.971. The Cloud 3D-QSAR server may facilitate the development of good QSAR models in drug discovery. Our server is free and now available at http://chemyang.ccnu.edu.cn/ccb/server/cloud3dQSAR/ and http://agroda.gzu.edu.cn:9999/ccb/server/cloud3dQSAR/.}, } @article {pmid33138072, year = {2020}, author = {Zhao, L}, title = {Privacy-Preserving Distributed Analytics in Fog-Enabled IoT Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {21}, pages = {}, pmid = {33138072}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) has evolved significantly with advances in gathering data that can be extracted to provide knowledge and facilitate decision-making processes. Currently, IoT data analytics encountered challenges such as growing data volumes collected by IoT devices and fast response requirements for time-sensitive applications in which traditional Cloud-based solution is unable to meet due to bandwidth and high latency limitations. In this paper, we develop a distributed analytics framework for fog-enabled IoT systems aiming to avoid raw data movement and reduce latency. The distributed framework leverages the computational capacities of all the participants such as edge devices and fog nodes and allows them to obtain the global optimal solution locally. To further enhance the privacy of data holders in the system, a privacy-preserving protocol is proposed using cryptographic schemes. Security analysis was conducted and it verified that exact private information about any edge device's raw data would not be inferred by an honest-but-curious neighbor in the proposed secure protocol. In addition, the accuracy of solution is unaffected in the secure protocol comparing to the proposed distributed algorithm without encryption. We further conducted experiments on three case studies: seismic imaging, diabetes progression prediction, and Enron email classification. On seismic imaging problem, the proposed algorithm can be up to one order of magnitude faster than the benchmarks in reaching the optimal solution. The evaluation results validate the effectiveness of the proposed methodology and demonstrate its potential to be a promising solution for data analytics in fog-enabled IoT systems.}, } @article {pmid33137686, year = {2021}, author = {Li, J and Tooth, S and Zhang, K and Zhao, Y}, title = {Visualisation of flooding along an unvegetated, ephemeral river using Google Earth Engine: Implications for assessment of channel-floodplain dynamics in a time of rapid environmental change.}, journal = {Journal of environmental management}, volume = {278}, number = {Pt 2}, pages = {111559}, doi = {10.1016/j.jenvman.2020.111559}, pmid = {33137686}, issn = {1095-8630}, mesh = {Agriculture ; Colorado ; *Floods ; Humans ; Hydrology ; *Rivers ; }, abstract = {Given rapid environmental change, the development of new, data-driven, interdisciplinary approaches is essential for improving assessment and management of river systems, especially with respect to flooding. In the world's extensive drylands, difficulties in obtaining field observations of major hydrological events mean that remote sensing techniques are commonly used to map river floods and assess flood impacts. Such techniques, however, are dependent on available cloud-free imagery during or immediately after peak discharge, and single images may omit important flood-related hydrogeomorphological events. Here, we combine multiple Landsat images from Google Earth Engine (GEE) with precipitation datasets and high-resolution (<0.65 m) satellite imagery to visualise flooding and assess the associated channel-floodplain dynamics along a 25 km reach of the unvegetated, ephemeral Río Colorado, Bolivia. After cloud and shadow removal, Landsat surface reflectance data were used to calculate the Modified Normalized Difference Water Index (MNDWI) and map flood extents and patterns. From 2004 through 2016, annual flooding area along the narrow (<30 m), shallow (<1.7 m), fine-grained (dominantly silt/clay) channels was positively correlated (R[2] = 0.83) with 2-day maximum precipitation totals. Rapid meander bend migration, bank erosion, and frequent overbank flooding was associated with formation of crevasse channels, splays, and headward-eroding channels, and with avulsion (shifting of flow from one channel to another). These processes demonstrate ongoing, widespread channel-floodplain dynamics despite low stream powers and cohesive sediments. Application of our study approaches to other dryland rivers will help generate comparative data on the controls, rates, patterns and timescales of channel-floodplain dynamics under scenarios of climate change and direct human impacts, with potential implications for improved river management.}, } @article {pmid33126457, year = {2020}, author = {Fang, J and Hu, J and Wei, J and Liu, T and Wang, B}, title = {An Efficient Resource Allocation Strategy for Edge-Computing Based Environmental Monitoring System.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {21}, pages = {}, pmid = {33126457}, issn = {1424-8220}, support = {61202076//National Natural Science Foundation of China/ ; 4192007//Beijing Municipal Natural Science Foundation/ ; }, abstract = {The cloud computing and microsensor technology has greatly changed environmental monitoring, but it is difficult for cloud-computing based monitoring system to meet the computation demand of smaller monitoring granularity and increasing monitoring applications. As a novel computing paradigm, edge computing deals with this problem by deploying resource on edge network. However, the particularity of environmental monitoring applications is ignored by most previous studies. In this paper, we proposed a resource allocation algorithm and a task scheduling strategy to reduce the average completion latency of environmental monitoring application, when considering the characteristic of environmental monitoring system and dependency among task. Simulations are conducted, and the results show that compared with the traditional algorithms. With considering the emergency task, the proposed methods decrease the average completion latency by 21.6% in the best scenario.}, } @article {pmid33124526, year = {2021}, author = {Singh, K and Singh, S and Malhotra, J}, title = {Spectral features based convolutional neural network for accurate and prompt identification of schizophrenic patients.}, journal = {Proceedings of the Institution of Mechanical Engineers. Part H, Journal of engineering in medicine}, volume = {235}, number = {2}, pages = {167-184}, doi = {10.1177/0954411920966937}, pmid = {33124526}, issn = {2041-3033}, mesh = {Diagnosis, Computer-Assisted ; *Electroencephalography ; Humans ; Machine Learning ; Neural Networks, Computer ; *Quality of Life ; }, abstract = {Schizophrenia is a fatal mental disorder, which affects millions of people globally by the disturbance in their thinking, feeling and behaviour. In the age of the internet of things assisted with cloud computing and machine learning techniques, the computer-aided diagnosis of schizophrenia is essentially required to provide its patients with an opportunity to own a better quality of life. In this context, the present paper proposes a spectral features based convolutional neural network (CNN) model for accurate identification of schizophrenic patients using spectral analysis of multichannel EEG signals in real-time. This model processes acquired EEG signals with filtering, segmentation and conversion into frequency domain. Then, given frequency domain segments are divided into six distinct spectral bands like delta, theta-1, theta-2, alpha, beta and gamma. The spectral features including mean spectral amplitude, spectral power and Hjorth descriptors (Activity, Mobility and Complexity) are extracted from each band. These features are independently fed to the proposed spectral features-based CNN and long short-term memory network (LSTM) models for classification. This work also makes use of raw time-domain and frequency-domain EEG segments for classification using temporal CNN and spectral CNN models of same architectures respectively. The overall analysis of simulation results of all models exhibits that the proposed spectral features based CNN model is an efficient technique for accurate and prompt identification of schizophrenic patients among healthy individuals with average classification accuracies of 94.08% and 98.56% for two different datasets with optimally small classification time.}, } @article {pmid33120553, year = {2020}, author = {Romansky, RP and Noninska, IS}, title = {Challenges of the digital age for privacy and personal data protection.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {17}, number = {5}, pages = {5288-5303}, doi = {10.3934/mbe.2020286}, pmid = {33120553}, issn = {1551-0018}, abstract = {Digital age can be described as a collection of different technological solutions as virtual environments, digital services, intelligent applications, machine learning, knowledge-based systems, etc., determining the specific characteristics of contemporary world globalization, e-communications, information sharing, virtualization, etc. However, there is an opportunity the technologies of the digital age to violate some basic principles of the information security and privacy by unregulated access to information and personal data, stored in different nodes of the global network. The goal of the article is to determine some special features of information and personal data protection and to summarise the main challenges of the digital age for the user's security and privacy. A brief presentation of the fundamental legislation in the fields of privacy and personal data protection is made in the introduction, followed by a review of related work on the topic. Components of information security for counteracting threats and attacks and basic principles in the organization of personal data protection are discussed. A summary of the basic challenges of the digital age is made by systematizing the negatives for user's privacy of the contemporary technologies as social computing, cloud services, Internet of Things, Big Data and Big Data Analytics and separate requirements to secure privacy of the participants based on General Data Protection Regulation principles are formulated.}, } @article {pmid33120510, year = {2020}, author = {Zhu, Y and Jiang, ZP and Mo, XH and Zhang, B and Al-Dhelaan, A and Al-Dhelaan, F}, title = {A study on the design methodology of TAC[3] for edge computing.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {17}, number = {5}, pages = {4406-4421}, doi = {10.3934/mbe.2020243}, pmid = {33120510}, issn = {1551-0018}, abstract = {The following scenarios, such as complex application requirements, ZB (Zettabyte) order of magnitude of network data, and tens of billions of connected devices, pose serious challenges to the capabilities and security of the three pillars of ICT: Computing, network, and storage. Edge computing came into being. Following the design methodology of "description-synthesis-simulation-optimization", TAC[3] (Tile-Architecture Cluster Computing Core) was proposed as the lightweight accelerated ECN (Edge Computing Node). ECN with a Tile-Architecture be designed and simulated through the method of executable description specification and polymorphous parallelism DSE (Design Space Exploration). By reasonable configuration of the edge computing environment and constant optimization of typical application scenarios, such as convolutional neural network and processing of image and graphic, we can meet the challenges of network bandwidth, end-cloud delay and privacy security brought by massive data of the IoE. The philosophy of "Edge-Cloud complements each other, and Edge-AI energizes each other" will become a new generation of IoE behavior principle.}, } @article {pmid33119530, year = {2021}, author = {Wu, Z and Sun, J and Zhang, Y and Zhu, Y and Li, J and Plaza, A and Benediktsson, JA and Wei, Z}, title = {Scheduling-Guided Automatic Processing of Massive Hyperspectral Image Classification on Cloud Computing Architectures.}, journal = {IEEE transactions on cybernetics}, volume = {51}, number = {7}, pages = {3588-3601}, doi = {10.1109/TCYB.2020.3026673}, pmid = {33119530}, issn = {2168-2275}, abstract = {The large data volume and high algorithm complexity of hyperspectral image (HSI) problems have posed big challenges for efficient classification of massive HSI data repositories. Recently, cloud computing architectures have become more relevant to address the big computational challenges introduced in the HSI field. This article proposes an acceleration method for HSI classification that relies on scheduling metaheuristics to automatically and optimally distribute the workload of HSI applications across multiple computing resources on a cloud platform. By analyzing the procedure of a representative classification method, we first develop its distributed and parallel implementation based on the MapReduce mechanism on Apache Spark. The subtasks of the processing flow that can be processed in a distributed way are identified as divisible tasks. The optimal execution of this application on Spark is further formulated as a divisible scheduling framework that takes into account both task execution precedences and task divisibility when allocating the divisible and indivisible subtasks onto computing nodes. The formulated scheduling framework is an optimization procedure that searches for optimized task assignments and partition counts for divisible tasks. Two metaheuristic algorithms are developed to solve this divisible scheduling problem. The scheduling results provide an optimized solution to the automatic processing of HSI big data on clouds, improving the computational efficiency of HSI classification by exploring the parallelism during the parallel processing flow. Experimental results demonstrate that our scheduling-guided approach achieves remarkable speedups by facilitating the automatic processing of HSI classification on Spark, and is scalable to the increasing HSI data volume.}, } @article {pmid33114594, year = {2020}, author = {Krishnamurthi, R and Kumar, A and Gopinathan, D and Nayyar, A and Qureshi, B}, title = {An Overview of IoT Sensor Data Processing, Fusion, and Analysis Techniques.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {21}, pages = {}, pmid = {33114594}, issn = {1424-8220}, abstract = {In the recent era of the Internet of Things, the dominant role of sensors and the Internet provides a solution to a wide variety of real-life problems. Such applications include smart city, smart healthcare systems, smart building, smart transport and smart environment. However, the real-time IoT sensor data include several challenges, such as a deluge of unclean sensor data and a high resource-consumption cost. As such, this paper addresses how to process IoT sensor data, fusion with other data sources, and analyses to produce knowledgeable insight into hidden data patterns for rapid decision-making. This paper addresses the data processing techniques such as data denoising, data outlier detection, missing data imputation and data aggregation. Further, it elaborates on the necessity of data fusion and various data fusion methods such as direct fusion, associated feature extraction, and identity declaration data fusion. This paper also aims to address data analysis integration with emerging technologies, such as cloud computing, fog computing and edge computing, towards various challenges in IoT sensor network and sensor data analysis. In summary, this paper is the first of its kind to present a complete overview of IoT sensor data processing, fusion and analysis techniques.}, } @article {pmid33113982, year = {2020}, author = {Biswash, SK and Jayakody, DNK}, title = {A Fog Computing-Based Device-Driven Mobility Management Scheme for 5G Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {21}, pages = {}, pmid = {33113982}, issn = {1424-8220}, support = {SPARC/2018-2019/P145/SL//SPARC, Ministry of Human Resource Development, India/ ; No.19-37-90037 and No.19-37-90105.//Russian Foundation for Basic Research/ ; NA//Framework of Competitiveness Enhancement Program of the National Research Tomsk Polytechnic University/ ; }, abstract = {The fog computing-based device-driven network is a promising solution for high data rates in modern cellular networks. It is a unique framework to reduce the generated-data, data management overheads, network scalability challenges, and help us to provide a pervasive computation environment for real-time network applications, where the mobile data is easily available and accessible to nearby fog servers. It explores a new dimension of the next generation network called fog networks. Fog networks is a complementary part of the cloud network environment. The proposed network architecture is a part of the newly emerged paradigm that extends the network computing infrastructure within the device-driven 5G communication system. This work explores a new design of the fog computing framework to support device-driven communication to achieve better Quality of Service (QoS) and Quality of Experience (QoE). In particular, we focus on, how potential is the fog computing orchestration framework? How it can be customized to the next generation of cellular communication systems? Next, we propose a mobility management procedure for fog networks, considering the static and dynamic mobile nodes. We compare our results with the legacy of cellular networks and observed that the proposed work has the least energy consumption, delay, latency, signaling cost as compared to LTE/LTE-A networks.}, } @article {pmid33113931, year = {2020}, author = {Wu, CY and Huang, KH}, title = {A Framework for Off-Line Operation of Smart and Traditional Devices of IoT Services.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {21}, pages = {}, pmid = {33113931}, issn = {1424-8220}, support = {MOST 108-3011-F-036-001//Ministry of Science and Technology, Taiwan/ ; B108-I01-016//Tatung University/ ; }, abstract = {Recently, with the continuous evolution of information technology, various products such as Building Information, Internet of Things (IoT), Big Data, Cloud Computing and Machine Learning have been developed and have created a lifestyle change. A smart Internet of Things (IoT) system is formed by combining the communication capabilities of the internet with control, monitoring and identification services to integrate people, things and objects. However, in some IoT environments that have a weak signal, such as remote areas, warehouses or basements, the network may become unstable, meaning that the IoT system is unable to provide efficient services. This paper therefore presents a framework that ensures the reliability of IoT system services so that even if the IoT system cannot connect to the network, the system can provide the services offline. To avoid increasing the installation cost or replacing existing traditional devices with modern smart devices, this framework can also be used to control traditional devices. The system operation is convenient because users can operate all their smart and traditional devices under the IoT system through voice commands and/or a handheld microcontroller, thus reducing the manual operation of the user. The framework proposed in this paper can be applied to various smart scenarios, including smart warehouses, smart restaurants, smart homes, smart farms and smart factories, to improve people's quality of life and convenience, and create a humane and comfortable smart living environment.}, } @article {pmid33106798, year = {2020}, author = {Lei, H and O'Connell, R and Ehwerhemuepha, L and Taraman, S and Feaster, W and Chang, A}, title = {Agile clinical research: A data science approach to scrumban in clinical medicine.}, journal = {Intelligence-based medicine}, volume = {3}, number = {}, pages = {100009}, pmid = {33106798}, issn = {2666-5212}, abstract = {The COVID-19 pandemic has required greater minute-to-minute urgency of patient treatment in Intensive Care Units (ICUs), rendering the use of Randomized Controlled Trials (RCTs) too slow to be effective for treatment discovery. There is a need for agility in clinical research, and the use of data science to develop predictive models for patient treatment is a potential solution. However, rapidly developing predictive models in healthcare is challenging given the complexity of healthcare problems and the lack of regular interaction between data scientists and physicians. Data scientists can spend significant time working in isolation to build predictive models that may not be useful in clinical environments. We propose the use of an agile data science framework based on the Scrumban framework used in software development. Scrumban is an iterative framework, where in each iteration larger problems are broken down into simple do-able tasks for data scientists and physicians. The two sides collaborate closely in formulating clinical questions and developing and deploying predictive models into clinical settings. Physicians can provide feedback or new hypotheses given the performance of the model, and refinement of the model or clinical questions can take place in the next iteration. The rapid development of predictive models can now be achieved with increasing numbers of publicly available healthcare datasets and easily accessible cloud-based data science tools. What is truly needed are data scientist and physician partnerships ensuring close collaboration between the two sides in using these tools to develop clinically useful predictive models to meet the demands of the COVID-19 healthcare landscape.}, } @article {pmid33104194, year = {2021}, author = {Goonasekera, N and Mahmoud, A and Chilton, J and Afgan, E}, title = {GalaxyCloudRunner: enhancing scalable computing for Galaxy.}, journal = {Bioinformatics (Oxford, England)}, volume = {37}, number = {12}, pages = {1763-1765}, pmid = {33104194}, issn = {1367-4811}, support = {U24 HG006620/HG/NHGRI NIH HHS/United States ; U41 HG006620/HG/NHGRI NIH HHS/United States ; 5U41HG006620-07/NH/NIH HHS/United States ; }, mesh = {Azure Stains ; *Computational Biology ; Documentation ; Humans ; *Software ; }, abstract = {SUMMARY: The existence of more than 100 public Galaxy servers with service quotas is indicative of the need for an increased availability of compute resources for Galaxy to use. The GalaxyCloudRunner enables a Galaxy server to easily expand its available compute capacity by sending user jobs to cloud resources. User jobs are routed to the acquired resources based on a set of configurable rules and the resources can be dynamically acquired from any of four popular cloud providers (AWS, Azure, GCP or OpenStack) in an automated fashion.

GalaxyCloudRunner is implemented in Python and leverages Docker containers. The source code is MIT licensed and available at https://github.com/cloudve/galaxycloudrunner. The documentation is available at http://gcr.cloudve.org/.}, } @article {pmid33100917, year = {2020}, author = {Amaro, RE and Mulholland, AJ}, title = {Biomolecular Simulations in the Time of COVID19, and After.}, journal = {Computing in science & engineering}, volume = {22}, number = {6}, pages = {30-36}, pmid = {33100917}, issn = {1521-9615}, support = {/WT_/Wellcome Trust/United Kingdom ; BB/L01386X/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; P41 GM103426/GM/NIGMS NIH HHS/United States ; R01 GM132826/GM/NIGMS NIH HHS/United States ; }, abstract = {COVID19 has changed life for people worldwide. Despite lockdowns globally, computational research has pressed on, working remotely and collaborating virtually on research questions in COVID19 and the virus it is caused by, SARS-CoV-2. Molecular simulations can help to characterize the function of viral and host proteins and have the potential to contribute to the search for vaccines and treatments. Changes in the modus operandi of research groups include broader adoption of the use of preprint servers, earlier and more open sharing of methods, models, and data, the use of social media to rapidly disseminate information, online seminars, and cloud-based virtual collaboration. Research funders and computing providers worldwide recognized the need to provide rapid and significant access to computational architectures. In this review, we discuss how the interplay of all of these factors is influencing the impact - both potential and realized - of biomolecular simulations in the fight against SARS-CoV-2.}, } @article {pmid33100581, year = {2020}, author = {Alam, M and Samad, MD and Vidyaratne, L and Glandon, A and Iftekharuddin, KM}, title = {Survey on Deep Neural Networks in Speech and Vision Systems.}, journal = {Neurocomputing}, volume = {417}, number = {}, pages = {302-321}, pmid = {33100581}, issn = {0925-2312}, support = {R01 EB020683/EB/NIBIB NIH HHS/United States ; }, abstract = {This survey presents a review of state-of-the-art deep neural network architectures, algorithms, and systems in vision and speech applications. Recent advances in deep artificial neural network algorithms and architectures have spurred rapid innovation and development of intelligent vision and speech systems. With availability of vast amounts of sensor data and cloud computing for processing and training of deep neural networks, and with increased sophistication in mobile and embedded technology, the next-generation intelligent systems are poised to revolutionize personal and commercial computing. This survey begins by providing background and evolution of some of the most successful deep learning models for intelligent vision and speech systems to date. An overview of large-scale industrial research and development efforts is provided to emphasize future trends and prospects of intelligent vision and speech systems. Robust and efficient intelligent systems demand low-latency and high fidelity in resource-constrained hardware platforms such as mobile devices, robots, and automobiles. Therefore, this survey also provides a summary of key challenges and recent successes in running deep neural networks on hardware-restricted platforms, i.e. within limited memory, battery life, and processing capabilities. Finally, emerging applications of vision and speech across disciplines such as affective computing, intelligent transportation, and precision medicine are discussed. To our knowledge, this paper provides one of the most comprehensive surveys on the latest developments in intelligent vision and speech applications from the perspectives of both software and hardware systems. Many of these emerging technologies using deep neural networks show tremendous promise to revolutionize research and development for future vision and speech systems.}, } @article {pmid33088611, year = {2020}, author = {Kovatch, P and Gai, L and Cho, HM and Fluder, E and Jiang, D}, title = {Optimizing High-Performance Computing Systems for Biomedical Workloads.}, journal = {IEEE International Symposium on Parallel & Distributed Processing, Workshops and Phd Forum : [proceedings]. IEEE International Symposium on Parallel & Distributed Processing, Workshops and Phd Forum}, volume = {2020}, number = {}, pages = {183-192}, pmid = {33088611}, issn = {2164-7062}, support = {S10 OD018522/OD/NIH HHS/United States ; S10 OD026880/OD/NIH HHS/United States ; }, abstract = {The productivity of computational biologists is limited by the speed of their workflows and subsequent overall job throughput. Because most biomedical researchers are focused on better understanding scientific phenomena rather than developing and optimizing code, a computing and data system implemented in an adventitious and/or non-optimized manner can impede the progress of scientific discovery. In our experience, most computational, life-science applications do not generally leverage the full capabilities of high-performance computing, so tuning a system for these applications is especially critical. To optimize a system effectively, systems staff must understand the effects of the applications on the system. Effective stewardship of the system includes an analysis of the impact of the applications on the compute cores, file system, resource manager and queuing policies. The resulting improved system design, and enactment of a sustainability plan, help to enable a long-term resource for productive computational and data science. We present a case study of a typical biomedical computational workload at a leading academic medical center supporting over $100 million per year in computational biology research. Over the past eight years, our high-performance computing system has enabled over 900 biomedical publications in four major areas: genetics and population analysis, gene expression, machine learning, and structural and chemical biology. We have upgraded the system several times in response to trends, actual usage, and user feedback. Major components crucial to this evolution include scheduling structure and policies, memory size, compute type and speed, parallel file system capabilities, and deployment of cloud technologies. We evolved a 70 teraflop machine to a 1.4 petaflop machine in seven years and grew our user base nearly 10-fold. For long-term stability and sustainability, we established a chargeback fee structure. Our overarching guiding principle for each progression has been to increase scientific throughput and enable enhanced scientific fidelity with minimal impact to existing user workflows or code. This highly-constrained system optimization has presented unique challenges, leading us to adopt new approaches to provide constructive pathways forward. We share our practical strategies resulting from our ongoing growth and assessments.}, } @article {pmid33079655, year = {2021}, author = {Guo, J and Tian, S and Liu, K and Guo, J}, title = {IoT-Enabled Fluorescence Sensor for Quantitative KET Detection and Anti-Drug Situational Awareness.}, journal = {IEEE transactions on nanobioscience}, volume = {20}, number = {1}, pages = {2-8}, doi = {10.1109/TNB.2020.3032121}, pmid = {33079655}, issn = {1558-2639}, mesh = {Adolescent ; *Awareness ; Humans ; Immunoassay ; Reproducibility of Results ; }, abstract = {Recently, drug abuse has become a worldwide concern. Among varieties of drugs, KET is found to be favorite in drug addicts, especially teenagers, for recreational purposes. KET is a kind of analgesic and anesthetic drug which can induce hallucinogenic and dissociative effects after high-dose abuse. Hence, it is critical to develop a rapid and sensitive detection method for strict drug control. In this study, we proposed a cloud-enabled smartphone based fluorescence sensor for quantitative detection of KET from human hair sample. The lateral flow immunoassay (LFIA) was used as the detecting strategy where UCNPs were introduced as fluorescent labels. The sensor was capable of identifying the up-converted fluorescence and calculating the signal intensities on TL and CL to obtain a T/C value, which was corresponding to the KET concentration. The sensor transmitted the test data to the cloud-enabled smartphone through Type-C interface, and the data were further uploaded to the edge of the network for cloud-edge computing and storage. The entire detection took only 5 minutes with high stability and reliability. The detection limit of KET was 1 ng/mL and a quantitative detection range from 1 to 150 ng/mL. Furthermore, based on the huge development of Internet of Things (IoT), an App was developed on the smartphone for anti-drug situational awareness. Based on this system, it was convenient for Police Department to perform on-site KET detection. Moreover, it was critical for prediction of the development trend of future events, benefiting much to constructing a harmonious society.}, } @article {pmid33066295, year = {2020}, author = {Janbi, N and Katib, I and Albeshri, A and Mehmood, R}, title = {Distributed Artificial Intelligence-as-a-Service (DAIaaS) for Smarter IoE and 6G Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {20}, pages = {}, pmid = {33066295}, issn = {1424-8220}, support = {RG-10-611-38//King Abdulaziz University/ ; }, abstract = {Artificial intelligence (AI) has taken us by storm, helping us to make decisions in everything we do, even in finding our "true love" and the "significant other". While 5G promises us high-speed mobile internet, 6G pledges to support ubiquitous AI services through next-generation softwarization, heterogeneity, and configurability of networks. The work on 6G is in its infancy and requires the community to conceptualize and develop its design, implementation, deployment, and use cases. Towards this end, this paper proposes a framework for Distributed AI as a Service (DAIaaS) provisioning for Internet of Everything (IoE) and 6G environments. The AI service is "distributed" because the actual training and inference computations are divided into smaller, concurrent, computations suited to the level and capacity of resources available with cloud, fog, and edge layers. Multiple DAIaaS provisioning configurations for distributed training and inference are proposed to investigate the design choices and performance bottlenecks of DAIaaS. Specifically, we have developed three case studies (e.g., smart airport) with eight scenarios (e.g., federated learning) comprising nine applications and AI delivery models (smart surveillance, etc.) and 50 distinct sensor and software modules (e.g., object tracker). The evaluation of the case studies and the DAIaaS framework is reported in terms of end-to-end delay, network usage, energy consumption, and financial savings with recommendations to achieve higher performance. DAIaaS will facilitate standardization of distributed AI provisioning, allow developers to focus on the domain-specific details without worrying about distributed training and inference, and help systemize the mass-production of technologies for smarter environments.}, } @article {pmid33064102, year = {2020}, author = {Kirchberg, J and Fritzmann, J and Weitz, J and Bork, U}, title = {eHealth Literacy of German Physicians in the Pre-COVID-19 Era: Questionnaire Study.}, journal = {JMIR mHealth and uHealth}, volume = {8}, number = {10}, pages = {e20099}, pmid = {33064102}, issn = {2291-5222}, mesh = {Adult ; *Attitude of Health Personnel ; COVID-19 ; Cohort Studies ; Coronavirus Infections/epidemiology ; Female ; Germany/epidemiology ; *Health Literacy ; Humans ; Male ; Middle Aged ; Pandemics ; Physicians/*psychology/statistics & numerical data ; Pneumonia, Viral/epidemiology ; Surveys and Questionnaires ; *Telemedicine ; }, abstract = {BACKGROUND: Digitalization is a disruptive technology that changes the way we deliver diagnostic procedures and treatments in medicine. Different stakeholders have varying interests in and expectations of the digitalization of modern medicine. Many recent digital advances in the medical field, such as the implementation of electronic health records, telemedical services, and mobile health apps, are increasingly used by medical professionals and patients. During the current pandemic outbreak of a novel coronavirus-caused respiratory disease (COVID-19), many modern information and communication technologies (ICT) have been used to overcome the physical barriers and limitations caused by government-issued curfews and workforce shortages. Therefore, the COVID-19 pandemic has led to a surge in the usage of modern ICT in medicine. At the same time, the eHealth literacy of physicians working with these technologies has probably not improved since our study.

OBJECTIVE: This paper describes a representative cohort of German physicians before the COVID-19 pandemic and their eHealth literacy and attitude towards modern ICT.

METHODS: A structured, self-developed questionnaire about user behavior and attitudes towards eHealth applications was administered to a representative cohort of 93 German physicians.

RESULTS: Of the 93 German physicians who participated in the study, 97% (90/93) use a mobile phone. Medical apps are used by 42% (39/93). Half of the surveyed physicians (47/93, 50%) use their private mobile phones for official purposes on a daily basis. Telemedicine is part of the daily routine for more than one-third (31/93, 33%) of all participants. More than 80% (76/93, 82%) of the trial participants state that their knowledge regarding the legal aspects and data safety of medical apps and cloud computing is insufficient.

CONCLUSIONS: Modern ICT is frequently used and mostly welcomed by German physicians. However, there is a tremendous lack of eHealth literacy and knowledge about the safe and secure implementation of these technologies in routine clinical practice.}, } @article {pmid33064097, year = {2020}, author = {Kim, JM and Lee, WR and Kim, JH and Seo, JM and Im, C}, title = {Light-Induced Fluorescence-Based Device and Hybrid Mobile App for Oral Hygiene Management at Home: Development and Usability Study.}, journal = {JMIR mHealth and uHealth}, volume = {8}, number = {10}, pages = {e17881}, pmid = {33064097}, issn = {2291-5222}, mesh = {Algorithms ; Fluorescence ; Humans ; *Mobile Applications ; Oral Hygiene ; }, abstract = {BACKGROUND: Dental diseases can be prevented through the management of dental plaques. Dental plaque can be identified using the light-induced fluorescence (LIF) technique that emits light at 405 nm. The LIF technique is more convenient than the commercial technique using a disclosing agent, but the result may vary for each individual as it still requires visual identification.

OBJECTIVE: The objective of this study is to introduce and validate a deep learning-based oral hygiene monitoring system that makes it easy to identify dental plaques at home.

METHODS: We developed a LIF-based system consisting of a device that can visually identify dental plaques and a mobile app that displays the location and area of dental plaques on oral images. The mobile app is programmed to automatically determine the location and distribution of dental plaques using a deep learning-based algorithm and present the results to the user as time series data. The mobile app is also built with convergence of naive and web applications so that the algorithm is executed on a cloud server to efficiently distribute computing resources.

RESULTS: The location and distribution of users' dental plaques could be identified via the hand-held LIF device or mobile app. The color correction filter in the device was developed using a color mixing technique. The mobile app was built as a hybrid app combining the functionalities of a native application and a web application. Through the scrollable WebView on the mobile app, changes in the time series of dental plaque could be confirmed. The algorithm for dental plaque detection was implemented to run on Amazon Web Services for object detection by single shot multibox detector and instance segmentation by Mask region-based convolutional neural network.

CONCLUSIONS: This paper shows that the system can be used as a home oral care product for timely identification and management of dental plaques. In the future, it is expected that these products will significantly reduce the social costs associated with dental diseases.}, } @article {pmid33050165, year = {2020}, author = {Butun, I and Sari, A and Österberg, P}, title = {Hardware Security of Fog End-Devices for the Internet of Things.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {20}, pages = {}, pmid = {33050165}, issn = {1424-8220}, support = {IB2019- 8185//Swedish Foundation for International Cooperation in Research and Higher Education/ ; 773717//Horizon 2020 Framework Programme/ ; }, abstract = {The proliferation of the Internet of Things (IoT) caused new application needs to emerge as rapid response ability is missing in the current IoT end-devices. Therefore, Fog Computing has been proposed to be an edge component for the IoT networks as a remedy to this problem. In recent times, cyber-attacks are on the rise, especially towards infrastructure-less networks, such as IoT. Many botnet attack variants (Mirai, Torii, etc.) have shown that the tiny microdevices at the lower spectrum of the network are becoming a valued participant of a botnet, for further executing more sophisticated attacks against infrastructural networks. As such, the fog devices also need to be secured against cyber-attacks, not only software-wise, but also from hardware alterations and manipulations. Hence, this article first highlights the importance and benefits of fog computing for IoT networks, then investigates the means of providing hardware security to these devices with an enriched literature review, including but not limited to Hardware Security Module, Physically Unclonable Function, System on a Chip, and Tamper Resistant Memory.}, } @article {pmid33048709, year = {2021}, author = {Oppermann, M and Kincaid, R and Munzner, T}, title = {VizCommender: Computing Text-Based Similarity in Visualization Repositories for Content-Based Recommendations.}, journal = {IEEE transactions on visualization and computer graphics}, volume = {27}, number = {2}, pages = {495-505}, doi = {10.1109/TVCG.2020.3030387}, pmid = {33048709}, issn = {1941-0506}, abstract = {Cloud-based visualization services have made visual analytics accessible to a much wider audience than ever before. Systems such as Tableau have started to amass increasingly large repositories of analytical knowledge in the form of interactive visualization workbooks. When shared, these collections can form a visual analytic knowledge base. However, as the size of a collection increases, so does the difficulty in finding relevant information. Content-based recommendation (CBR) systems could help analysts in finding and managing workbooks relevant to their interests. Toward this goal, we focus on text-based content that is representative of the subject matter of visualizations rather than the visual encodings and style. We discuss the challenges associated with creating a CBR based on visualization specifications and explore more concretely how to implement the relevance measures required using Tableau workbook specifications as the source of content data. We also demonstrate what information can be extracted from these visualization specifications and how various natural language processing techniques can be used to compute similarity between workbooks as one way to measure relevance. We report on a crowd-sourced user study to determine if our similarity measure mimics human judgement. Finally, we choose latent Dirichl et al.ocation (LDA) as a specific model and instantiate it in a proof-of-concept recommender tool to demonstrate the basic function of our similarity measure.}, } @article {pmid33044796, year = {2020}, author = {Wang, L and Yan, B and Boasson, V}, title = {A national fight against COVID-19: lessons and experiences from China.}, journal = {Australian and New Zealand journal of public health}, volume = {44}, number = {6}, pages = {502-507}, pmid = {33044796}, issn = {1753-6405}, mesh = {COVID-19 ; China/epidemiology ; Coronavirus Infections/epidemiology/*prevention & control ; Disease Outbreaks/*prevention & control ; Emergency Service, Hospital/*organization & administration ; Government ; Humans ; Leadership ; Moral Obligations ; Pandemics/*prevention & control ; Pneumonia, Viral/epidemiology/*prevention & control ; Public Health/*methods ; Qualitative Research ; }, abstract = {OBJECTIVE: This paper aims to review the public health measures and actions taken during the fight against COVID-19 in China, to generate a model for prevention and control public health emergency by summarising the lessons and experiences gained.

METHODS: This paper adopts a widely accepted qualitative research and coding method to form an analysis on word materials.

RESULTS: Although Chinese CDC didn't work effectively in the early stages on risk identification and warning, China was able to respond quickly and successfully to this medical emergency after the initial shock of the awareness of a novel epidemic with a swift implementation of national-scale health emergency management.

CONCLUSIONS: The success in fighting against COVID-19 in China can be attributed to: 1) adaptable governance to changing situations; 2) culture of moral compliance with rules; 3) trusted collaboration between government and people; 4) an advanced technical framework ABCD+5G (A-Artificial intelligence; B-Block chain; C-Cloud computing; D-Big data). Implications for public health: This paper constructs a conceptual model for pandemic management based on the lessons and experiences of fighting COVID-19 in China. It provides insights for pandemic control and public emergency management in similar context.}, } @article {pmid33030032, year = {2022}, author = {Yu, Z and Jung, D and Park, S and Hu, Y and Huang, K and Rasco, BA and Wang, S and Ronholm, J and Lu, X and Chen, J}, title = {Smart traceability for food safety.}, journal = {Critical reviews in food science and nutrition}, volume = {62}, number = {4}, pages = {905-916}, doi = {10.1080/10408398.2020.1830262}, pmid = {33030032}, issn = {1549-7852}, mesh = {Food ; *Food Safety ; *Food Supply ; Humans ; }, abstract = {Current food production faces a tremendous challenge due to the growing human population. The global population is estimated to reach 9 billion by 2050 with 70% more food being required. Safe food is an important dimension of food security, and food traceability across the supply chain is a key component of this. However, current food traceability systems are challenged by frequent occurrences of food safety incidents and food recalls that have damaged consumer confidence, caused huge economic loss, and put pressure on food safety agencies. This review focuses on smart food traceability that has the potential to significantly improve food safety in global food supply chains. The basic concepts and critical perspectives for various detection strategies for food safety are summarized, including portable detection devices, smart indicators and sensors integrated on food packages, and data-assisted whole-genome sequencing. In addition, new digital technologies, such as Internet-of-things (IoTs) and cloud computing, are discussed with the aim of providing readers with an overview of the exciting opportunities in smart food traceability systems.}, } @article {pmid33019245, year = {2020}, author = {Ilokah, M and Eklund, JM}, title = {A Secure Privacy Preserving Cloud-based Framework for Sharing Electronic Health Data.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2020}, number = {}, pages = {5592-5597}, doi = {10.1109/EMBC44109.2020.9175792}, pmid = {33019245}, issn = {2694-0604}, mesh = {*Cloud Computing ; Computer Security ; Electronic Health Records ; Information Storage and Retrieval ; *Privacy ; }, abstract = {There exists a need for sharing user health data, especially with institutes for research purposes, in a secure fashion. This is especially true in the case of a system that includes a third party storage service, such as cloud computing, which limits the control of the data owner. The use of encryption for secure data storage continues to evolve to meet the need for flexible and fine-grained access control. This evolution has led to the development of Attribute Based Encryption (ABE). The use of ABE to ensure the security and privacy of health data has been explored. This paper presents an ABE based framework which allows for the secure outsourcing of the more computationally intensive processes for data decryption to the cloud servers. This reduces the time needed for decryption to occur at the user end and reduces the amount of computational power needed by users to access data.}, } @article {pmid33018935, year = {2020}, author = {Cheon, A and Jung, SY and Prather, C and Sarmiento, M and Wong, K and Woodbridge, DM}, title = {A Machine Learning Approach to Detecting Low Medication State with Wearable Technologies.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2020}, number = {}, pages = {4252-4255}, doi = {10.1109/EMBC44109.2020.9176310}, pmid = {33018935}, issn = {2694-0604}, mesh = {Humans ; *Machine Learning ; Medication Adherence ; *Wearable Electronic Devices ; }, abstract = {Medication adherence is a critical component and implicit assumption of the patient life cycle that is often violated, incurring financial and medical costs to both patients and the medical system at large. As obstacles to medication adherence are complex and varied, approaches to overcome them must themselves be multifaceted.This paper demonstrates one such approach using sensor data recorded by an Apple Watch to detect low counts of pill medication in standard prescription bottles. We use distributed computing on a cloud-based platform to efficiently process large volumes of high-frequency data and train a Gradient Boosted Tree machine learning model. Our final model yielded average cross-validated accuracy and F1 scores of 80.27% and 80.22%, respectively.We conclude this paper with two use cases in which wearable devices such as the Apple Watch can contribute to efforts to improve patient medication adherence.}, } @article {pmid33018783, year = {2020}, author = {LeMoyne, R and Mastroianni, T and Whiting, D and Tomycz, N}, title = {Parametric evaluation of deep brain stimulation parameter configurations for Parkinson's disease using a conformal wearable and wireless inertial sensor system and machine learning.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2020}, number = {}, pages = {3606-3611}, doi = {10.1109/EMBC44109.2020.9175408}, pmid = {33018783}, issn = {2694-0604}, mesh = {*Deep Brain Stimulation ; Humans ; Machine Learning ; *Parkinson Disease/therapy ; Tremor/therapy ; *Wearable Electronic Devices ; }, abstract = {Deep brain stimulation enables highly specified patient-unique therapeutic intervention ameliorating the symptoms of Parkinson's disease. Inherent to the efficacy of deep brain stimulation is the acquisition of an optimal parameter configuration. Using conventional methods, the optimization process for tuning the deep brain stimulation system parameters can intrinsically induce strain on clinical resources. An advanced means of quantifying Parkinson's hand tremor and distinguishing between parameter settings would be highly beneficial. The conformal wearable and wireless inertial sensor system, such as the BioStamp nPoint, has a volumetric profile on the order of a bandage that readily enables convenient quantification of Parkinson's disease hand tremor. Furthermore, the BioStamp nPoint has been certified by the FDA as a 510(k) medical device for acquisition of medical grade data. Parametric variation of the amplitude parameter for deep brain stimulation can be quantified through the BioStamp nPoint conformal wearable and wireless inertial sensor system mounted to the dorsum of the hand. The acquired inertial sensor signal data can be wirelessly transmitted to a secure Cloud computing environment for post-processing. The quantified inertial sensor data for the parametric study of the effects of varying amplitude can be distinguished through machine learning classification. Software automation through Python can consolidate the inertial sensor data into a suitable feature set format. Using the multilayer perceptron neural network considerable machine learning classification accuracy is attained to distinguish multiple parametric settings of amplitude for deep brain stimulation, such as 4.0 mA, 2.5 mA, 1.0 mA, and 'Off' status representing a baseline. These findings constitute an advance toward the pathway of attaining real-time closed loop automated parameter configuration tuning for treatment of Parkinson's disease using deep brain stimulation.}, } @article {pmid33007867, year = {2020}, author = {Wu, HL and Chang, CC and Zheng, YZ and Chen, LS and Chen, CC}, title = {A Secure IoT-Based Authentication System in Cloud Computing Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {19}, pages = {}, pmid = {33007867}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) is currently the most popular field in communication and information techniques. However, designing a secure and reliable authentication scheme for IoT-based architectures is still a challenge. In 2019, Zhou et al. showed that schemes pro-posed by Amin et al. and Maitra et al. are vulnerable to off-line guessing attacks, user tracking attacks, etc. On this basis, a lightweight authentication scheme based on IoT is proposed, and an authentication scheme based on IoT is proposed, which can resist various types of attacks and realize key security features such as user audit, mutual authentication, and session security. However, we found weaknesses in the scheme upon evaluation. Hence, we proposed an enhanced scheme based on their mechanism, thus achieving the security requirements and resisting well-known attacks.}, } @article {pmid33001812, year = {2022}, author = {Li, P and Zhao, YB and Kang, Y}, title = {Integrated Channel-Aware Scheduling and Packet-Based Predictive Control for Wireless Cloud Control Systems.}, journal = {IEEE transactions on cybernetics}, volume = {52}, number = {5}, pages = {2735-2749}, doi = {10.1109/TCYB.2020.3019179}, pmid = {33001812}, issn = {2168-2275}, abstract = {The scheduling and control of wireless cloud control systems involving multiple independent control systems and a centralized cloud computing platform are investigated. For such systems, the scheduling of the data transmission as well as some particular design of the controller can be equally important. From this observation, we propose a dual channel-aware scheduling strategy under the packet-based model predictive control framework, which integrates a decentralized channel-aware access strategy for each sensor, a centralized access strategy for the controllers, and a packet-based predictive controller to stabilize each control system. First, the decentralized scheduling strategy for each sensor is set in a noncooperative game framework and is then designed with asymptotical convergence. Then, the central scheduler for the controllers takes advantage of a prioritized threshold strategy, which outperforms a random one neglecting the information of the channel gains. Finally, we prove the stability for each system by constructing a new Lyapunov function, and further reveal the dependence of the control system stability on the prediction horizon and successful access probabilities of each sensor and controller. These theoretical results are successfully verified by numerical simulation.}, } @article {pmid32989184, year = {2020}, author = {Eisa, M and Sandhu, A and Prakash, R and Ganocy, SJ and Fass, R}, title = {The Risk of Acute Myocardial Infarction in Patients With Gastroesophageal Reflux Disease.}, journal = {Journal of neurogastroenterology and motility}, volume = {26}, number = {4}, pages = {471-476}, pmid = {32989184}, issn = {2093-0879}, abstract = {BACKGROUND/AIMS: A number of inflammatory mediators have been documented to be elevated in gastroesophageal reflux disease (GERD). Similar inflammatory mediators are involved in coronary artery disease. Thus, the aim of the study is to determine if GERD is a risk factor for developing acute myocardial infarction (AMI).

METHODS: We used Explorys, a private cloud-based data store to which a number of health care systems feed information. We identified a cohort of GERD patients who have undergone an esophagogastroduodenoscopy compared to those without GERD. Incidence of AMI was studied after statistically controlling for known AMI risk factors.

RESULTS: Total of 200 400 patients were included in the GERD group and 386 800 patients in non-GERD group. The primary event of AMI occurred in 17 200 patients in the GERD group (8.6%) vs 24 300 in non-GERD group (6.3%). Using logistic regression analysis and controlling for 6 major risk factors which included male gender (OR, 1.09; 95% CI, 1.07-1.11; P < 0.001), hypertension (OR, 6.53; 95% CI, 6.21-6.88; P < 0.001), hyperlipidemia (OR, 3.08; 95% CI, 2.96-3.20; P < 0.001), diabetes mellitus (OR, 1.72; 95% CI, 1.69- 1.76; P < 0.001), obesity (OR, 1.02; 95% CI, 1.00-1.04; P = 0.044), and smoking (OR, 1.38; 95% CI, 1.35-1.41; P < 0.001). The odds of developing AMI in the GERD population was 1.11 (95% CI, 1.08-1.13; P < 0.001). GERD had higher odds of developing AMI than male gender or obesity in our study.

CONCLUSIONS: This study demonstrated that GERD is a risk factor for AMI, higher than male gender and obesity. However, the increased risk may be clinically insignificant.}, } @article {pmid32977409, year = {2020}, author = {Grigorescu, S and Cocias, T and Trasnea, B and Margheri, A and Lombardi, F and Aniello, L}, title = {Cloud2Edge Elastic AI Framework for Prototyping and Deployment of AI Inference Engines in Autonomous Vehicles.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {19}, pages = {}, pmid = {32977409}, issn = {1424-8220}, abstract = {Self-driving cars and autonomous vehicles are revolutionizing the automotive sector, shaping the future of mobility altogether. Although the integration of novel technologies such as Artificial Intelligence (AI) and Cloud/Edge computing provides golden opportunities to improve autonomous driving applications, there is the need to modernize accordingly the whole prototyping and deployment cycle of AI components. This paper proposes a novel framework for developing so-called AI Inference Engines for autonomous driving applications based on deep learning modules, where training tasks are deployed elastically over both Cloud and Edge resources, with the purpose of reducing the required network bandwidth, as well as mitigating privacy issues. Based on our proposed data driven V-Model, we introduce a simple yet elegant solution for the AI components development cycle, where prototyping takes place in the cloud according to the Software-in-the-Loop (SiL) paradigm, while deployment and evaluation on the target ECUs (Electronic Control Units) is performed as Hardware-in-the-Loop (HiL) testing. The effectiveness of the proposed framework is demonstrated using two real-world use-cases of AI inference engines for autonomous vehicles, that is environment perception and most probable path prediction.}, } @article {pmid32974110, year = {2020}, author = {Cheng, CW and Brown, CR and Venugopalan, J and Wang, MD}, title = {Towards an Effective Patient Health Engagement System Using Cloud-Based Text Messaging Technology.}, journal = {IEEE journal of translational engineering in health and medicine}, volume = {8}, number = {}, pages = {2700107}, pmid = {32974110}, issn = {2168-2372}, abstract = {Patient and health provider interaction via text messaging (TM) has become an accepted form of communication, often favored by adolescents and young adults. While integration of TM in disease management has aided health interventions and behavior modifications, broader adoption is hindered by expense, fixed reporting schedules, and monotonic communication. A low-cost, flexible TM reporting system (REMOTES) was developed using inexpensive cloud-based services with features of two-way communication, personalized reporting scheduling, and scalable and secured data storage. REMOTES is a template-based reporting tool adaptable to a wide-range of complexity in response formats. In a pilot study, 27 adolescents with sickle cell disease participated to assess feasibility of REMOTES in both inpatient and outpatient settings. Subject compliance with at least one daily self-report pain query was 94.9% (112/118) during inpatient and 91.1% (327/359) during outpatient, with an overall accuracy of 99.2% (970/978). With use of a more complex 8-item questionnaire, 30% (7/21) inpatient and 66.6% (36/54) outpatient responses were reported with 98.1% (51/52) reporting accuracy. All participants expressed high pre-trial expectation (88%) and post-trial satisfaction (89%). The study suggests that cloud-based text messaging is feasible and an easy-of-use solution for low-cost and personalized patient engagement.}, } @article {pmid32970755, year = {2020}, author = {Wang, X and Qiu, P}, title = {A freight integer linear programming model under fog computing and its application in the optimization of vehicle networking deployment.}, journal = {PloS one}, volume = {15}, number = {9}, pages = {e0239628}, pmid = {32970755}, issn = {1932-6203}, mesh = {City Planning/standards ; *Computer Simulation ; *Internet of Things ; Motor Vehicles/*statistics & numerical data ; Remote Sensing Technology ; }, abstract = {The increase in data amount makes the traditional Internet of Vehicles (IoV) fail to meet users' needs. Hence, the IoV is explored in series. To study the construction of freight integer linear programming (ILP) model based on fog computing (FG), and to analyze the application of the model in the optimization of the networking deployment (ND) of the IoV. FG and ILP are combined to build a freight computing ILP model. The model is used to analyze the application of ND optimization in the IoV system through simulations. The results show that while analyzing the ND results in different scenarios, the model is more suitable for small-scale scenarios and can optimize the objective function; however, its utilization rate is low in large-scale scenarios. While comparing and analyzing the network cost and running time, compared with traditional cloud computing solutions, the ND solution based on FG requires less cost, shorter running time, and has apparent effectiveness and efficiency. Therefore, it is found that the FG-based model has low cost, short running time, and apparent efficiency, which provides an experimental basis for the application of the later deployment of freight vehicles (FVs) in the Internet of Things (IoT) system for ND optimization. The results will provide important theoretical support for the overall deployment of IoV.}, } @article {pmid32969658, year = {2020}, author = {Sun, G and Jin, Y and Li, S and Yang, Z and Shi, B and Chang, C and Abramov, YA}, title = {Virtual Coformer Screening by Crystal Structure Predictions: Crucial Role of Crystallinity in Pharmaceutical Cocrystallization.}, journal = {The journal of physical chemistry letters}, volume = {11}, number = {20}, pages = {8832-8838}, doi = {10.1021/acs.jpclett.0c02371}, pmid = {32969658}, issn = {1948-7185}, mesh = {Acetaminophen/*chemistry ; Computer Simulation ; Crystallization ; Density Functional Theory ; Drug Evaluation, Preclinical ; Indomethacin/*chemistry ; Models, Molecular ; Pharmaceutical Preparations/*chemistry ; Thermodynamics ; }, abstract = {One of the most popular strategies of the optimization of drug properties in the pharmaceutical industry appears to be a solid form changing into a cocrystalline form. A number of virtual screening approaches have been previously developed to allow a selection of the most promising cocrystal formers (coformers) for an experimental follow-up. A significant drawback of those methods is related to the lack of accounting for the crystallinity contribution to cocrystal formation. To address this issue, we propose in this study two virtual coformer screening approaches based on a modern cloud-computing crystal structure prediction (CSP) technology at a dispersion-corrected density functional theory (DFT-D) level. The CSP-based methods were for the first time validated on challenging cases of indomethacin and paracetamol cocrystallization, for which the previously developed approaches provided poor predictions. The calculations demonstrated a dramatic improvement of the virtual coformer screening performance relative to the other methods. It is demonstrated that the crystallinity contribution to the formation of paracetamol and indomethacin cocrystals is a dominant one and, therefore, should not be ignored in the virtual screening calculations. Our results encourage a broad utilization of the proposed CSP-based technology in the pharmaceutical industry as the only virtual coformer screening method that directly accounts for the crystallinity contribution.}, } @article {pmid32968122, year = {2020}, author = {Peter, BG and Messina, JP and Lin, Z and Snapp, SS}, title = {Crop climate suitability mapping on the cloud: a geovisualization application for sustainable agriculture.}, journal = {Scientific reports}, volume = {10}, number = {1}, pages = {15487}, pmid = {32968122}, issn = {2045-2322}, abstract = {Climate change, food security, and environmental sustainability are pressing issues faced by today's global population. As production demands increase and climate threatens crop productivity, agricultural research develops innovative technologies to meet these challenges. Strategies include biodiverse cropping arrangements, new crop introductions, and genetic modification of crop varieties that are resilient to climatic and environmental stressors. Geography in particular is equipped to address a critical question in this pursuit-when and where can crop system innovations be introduced? This manuscript presents a case study of the geographic scaling potential utilizing common bean, delivers an open access Google Earth Engine geovisualization application for mapping the fundamental climate niche of any crop, and discusses food security and legume biodiversity in Sub-Saharan Africa. The application is temporally agile, allowing variable growing season selections and the production of 'living maps' that are continually producible as new data become available. This is an essential communication tool for the future, as practitioners can evaluate the potential geographic range for newly-developed, experimental, and underrepresented crop varieties for facilitating sustainable and innovative agroecological solutions.}, } @article {pmid32967094, year = {2020}, author = {Tahir, A and Chen, F and Khan, HU and Ming, Z and Ahmad, A and Nazir, S and Shafiq, M}, title = {A Systematic Review on Cloud Storage Mechanisms Concerning e-Healthcare Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {18}, pages = {}, pmid = {32967094}, issn = {1424-8220}, mesh = {*Cloud Computing ; *Information Storage and Retrieval ; Reproducibility of Results ; *Telemedicine ; }, abstract = {As the expenses of medical care administrations rise and medical services experts are becoming rare, it is up to medical services organizations and institutes to consider the implementation of medical Health Information Technology (HIT) innovation frameworks. HIT permits health associations to smooth out their considerable cycles and offer types of assistance in a more productive and financially savvy way. With the rise of Cloud Storage Computing (CSC), an enormous number of associations and undertakings have moved their healthcare data sources to distributed storage. As the information can be mentioned whenever universally, the accessibility of information becomes an urgent need. Nonetheless, outages in cloud storage essentially influence the accessibility level. Like the other basic variables of cloud storage (e.g., reliability quality, performance, security, and protection), availability also directly impacts the data in cloud storage for e-Healthcare systems. In this paper, we systematically review cloud storage mechanisms concerning the healthcare environment. Additionally, in this paper, the state-of-the-art cloud storage mechanisms are critically reviewed for e-Healthcare systems based on their characteristics. In short, this paper summarizes existing literature based on cloud storage and its impact on healthcare, and it likewise helps researchers, medical specialists, and organizations with a solid foundation for future studies in the healthcare environment.}, } @article {pmid32966438, year = {2020}, author = {Cerasoli, FT and Sherbert, K and Sławińska, J and Buongiorno Nardelli, M}, title = {Quantum computation of silicon electronic band structure.}, journal = {Physical chemistry chemical physics : PCCP}, volume = {22}, number = {38}, pages = {21816-21822}, doi = {10.1039/d0cp04008h}, pmid = {32966438}, issn = {1463-9084}, abstract = {Development of quantum architectures during the last decade has inspired hybrid classical-quantum algorithms in physics and quantum chemistry that promise simulations of fermionic systems beyond the capability of modern classical computers, even before the era of quantum computing fully arrives. Strong research efforts have been recently made to obtain minimal depth quantum circuits which could accurately represent chemical systems. Here, we show that unprecedented methods used in quantum chemistry, designed to simulate molecules on quantum processors, can be extended to calculate properties of periodic solids. In particular, we present minimal depth circuits implementing the variational quantum eigensolver algorithm and successfully use it to compute the band structure of silicon on a quantum machine for the first time. We are convinced that the presented quantum experiments performed on cloud-based platforms will stimulate more intense studies towards scalable electronic structure computation of advanced quantum materials.}, } @article {pmid32966223, year = {2020}, author = {Whaiduzzaman, M and Hossain, MR and Shovon, AR and Roy, S and Laszka, A and Buyya, R and Barros, A}, title = {A Privacy-Preserving Mobile and Fog Computing Framework to Trace and Prevent COVID-19 Community Transmission.}, journal = {IEEE journal of biomedical and health informatics}, volume = {24}, number = {12}, pages = {3564-3575}, pmid = {32966223}, issn = {2168-2208}, mesh = {COVID-19/*transmission/virology ; Humans ; Mobile Applications ; *Privacy ; SARS-CoV-2/isolation & purification ; }, abstract = {To slow down the spread of COVID-19, governments worldwide are trying to identify infected people, and contain the virus by enforcing isolation, and quarantine. However, it is difficult to trace people who came into contact with an infected person, which causes widespread community transmission, and mass infection. To address this problem, we develop an e-government Privacy-Preserving Mobile, and Fog computing framework entitled PPMF that can trace infected, and suspected cases nationwide. We use personal mobile devices with contact tracing app, and two types of stationary fog nodes, named Automatic Risk Checkers (ARC), and Suspected User Data Uploader Node (SUDUN), to trace community transmission alongside maintaining user data privacy. Each user's mobile device receives a Unique Encrypted Reference Code (UERC) when registering on the central application. The mobile device, and the central application both generate Rotational Unique Encrypted Reference Code (RUERC), which broadcasted using the Bluetooth Low Energy (BLE) technology. The ARCs are placed at the entry points of buildings, which can immediately detect if there are positive or suspected cases nearby. If any confirmed case is found, the ARCs broadcast pre-cautionary messages to nearby people without revealing the identity of the infected person. The SUDUNs are placed at the health centers that report test results to the central cloud application. The reported data is later used to map between infected, and suspected cases. Therefore, using our proposed PPMF framework, governments can let organizations continue their economic activities without complete lockdown.}, } @article {pmid32965236, year = {2020}, author = {Brown, AP and Randall, SM}, title = {Secure Record Linkage of Large Health Data Sets: Evaluation of a Hybrid Cloud Model.}, journal = {JMIR medical informatics}, volume = {8}, number = {9}, pages = {e18920}, pmid = {32965236}, issn = {2291-9694}, abstract = {BACKGROUND: The linking of administrative data across agencies provides the capability to investigate many health and social issues with the potential to deliver significant public benefit. Despite its advantages, the use of cloud computing resources for linkage purposes is scarce, with the storage of identifiable information on cloud infrastructure assessed as high risk by data custodians.

OBJECTIVE: This study aims to present a model for record linkage that utilizes cloud computing capabilities while assuring custodians that identifiable data sets remain secure and local.

METHODS: A new hybrid cloud model was developed, including privacy-preserving record linkage techniques and container-based batch processing. An evaluation of this model was conducted with a prototype implementation using large synthetic data sets representative of administrative health data.

RESULTS: The cloud model kept identifiers on premises and uses privacy-preserved identifiers to run all linkage computations on cloud infrastructure. Our prototype used a managed container cluster in Amazon Web Services to distribute the computation using existing linkage software. Although the cost of computation was relatively low, the use of existing software resulted in an overhead of processing of 35.7% (149/417 min execution time).

CONCLUSIONS: The result of our experimental evaluation shows the operational feasibility of such a model and the exciting opportunities for advancing the analysis of linkage outputs.}, } @article {pmid32960944, year = {2021}, author = {Huang, W and Zheng, P and Cui, Z and Li, Z and Gao, Y and Yu, H and Tang, Y and Yuan, X and Zhang, Z}, title = {MMAP: a cloud computing platform for mining the maximum accuracy of predicting phenotypes from genotypes.}, journal = {Bioinformatics (Oxford, England)}, volume = {37}, number = {9}, pages = {1324-1326}, pmid = {32960944}, issn = {1367-4811}, mesh = {Animals ; Bayes Theorem ; *Cloud Computing ; Genomics ; Genotype ; Humans ; *Models, Genetic ; Phenotype ; Polymorphism, Single Nucleotide ; }, abstract = {UNLABELLED: Accurately predicting phenotypes from genotypes holds great promise to improve health management in humans and animals, and breeding efficiency in animals and plants. Although many prediction methods have been developed, the optimal method differs across datasets due to multiple factors, including species, environments, populations and traits of interest. Studies have demonstrated that the number of genes underlying a trait and its heritability are the two key factors that determine which method fits the trait the best. In many cases, however, these two factors are unknown for the traits of interest. We developed a cloud computing platform for Mining the Maximum Accuracy of Predicting phenotypes from genotypes (MMAP) using unsupervised learning on publicly available real data and simulated data. MMAP provides a user interface to upload input data, manage projects and analyses and download the output results. The platform is free for the public to conduct computations for predicting phenotypes and genetic merit using the best prediction method optimized from many available ones, including Ridge Regression, gBLUP, compressed BLUP, Bayesian LASSO, Bayes A, B, Cpi and many more. Users can also use the platform to conduct data analyses with any methods of their choice. It is expected that extensive usage of MMAP would enrich the training data, which in turn results in continual improvement of the identification of the best method for use with particular traits.

The MMAP user manual, tutorials and example datasets are available at http://zzlab.net/MMAP.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid32960888, year = {2020}, author = {Ashraf, I and Umer, M and Majeed, R and Mehmood, A and Aslam, W and Yasir, MN and Choi, GS}, title = {Home automation using general purpose household electric appliances with Raspberry Pi and commercial smartphone.}, journal = {PloS one}, volume = {15}, number = {9}, pages = {e0238480}, pmid = {32960888}, issn = {1932-6203}, mesh = {Air Conditioning ; Automation/*instrumentation/*methods ; Computers ; Electrical Equipment and Supplies ; Electricity ; Humans ; Smartphone ; Software ; }, abstract = {This study presents the design and implementation of a home automation system that focuses on the use of ordinary electrical appliances for remote control using Raspberry Pi and relay circuits and does not use expensive IP-based devices. Common Lights, Heating, Ventilation, and Air Conditioning (HVAC), fans, and other electronic devices are among the appliances that can be used in this system. A smartphone app is designed that helps the user to design the smart home to his actual home via easy and interactive drag & drop option. The system provides control over the appliances via both the local network and remote access. Data logging over the Microsoft Azure cloud database ensures system recovery in case of gateway failure and data record for lateral use. Periodical notifications also help the user to optimize the usage of home appliances. Moreover, the user can set his preferences and the appliances are auto turned off and on to meet user-specific requirements. Raspberry Pi acting as the server maintains the database of each appliance. HTTP web interface and apache server are used for communication between the android app and raspberry pi. With a 5v relay circuit and micro-processor Raspberry Pi, the proposed system is low-cost, energy-efficient, easy to operate, and affordable for low-income houses.}, } @article {pmid32952600, year = {2020}, author = {Huang, PJ and Chang, JH and Lin, HH and Li, YX and Lee, CC and Su, CT and Li, YL and Chang, MT and Weng, S and Cheng, WH and Chiu, CH and Tang, P}, title = {DeepVariant-on-Spark: Small-Scale Genome Analysis Using a Cloud-Based Computing Framework.}, journal = {Computational and mathematical methods in medicine}, volume = {2020}, number = {}, pages = {7231205}, pmid = {32952600}, issn = {1748-6718}, mesh = {*Cloud Computing/economics ; Computational Biology/methods ; Cost-Benefit Analysis ; *Deep Learning ; *Genetic Variation ; Genome, Human ; High-Throughput Nucleotide Sequencing/economics/standards/statistics & numerical data ; Humans ; Neural Networks, Computer ; Software ; Whole Genome Sequencing/economics/standards/*statistics & numerical data ; }, abstract = {Although sequencing a human genome has become affordable, identifying genetic variants from whole-genome sequence data is still a hurdle for researchers without adequate computing equipment or bioinformatics support. GATK is a gold standard method for the identification of genetic variants and has been widely used in genome projects and population genetic studies for many years. This was until the Google Brain team developed a new method, DeepVariant, which utilizes deep neural networks to construct an image classification model to identify genetic variants. However, the superior accuracy of DeepVariant comes at the cost of computational intensity, largely constraining its applications. Accordingly, we present DeepVariant-on-Spark to optimize resource allocation, enable multi-GPU support, and accelerate the processing of the DeepVariant pipeline. To make DeepVariant-on-Spark more accessible to everyone, we have deployed the DeepVariant-on-Spark to the Google Cloud Platform (GCP). Users can deploy DeepVariant-on-Spark on the GCP following our instruction within 20 minutes and start to analyze at least ten whole-genome sequencing datasets using free credits provided by the GCP. DeepVaraint-on-Spark is freely available for small-scale genome analysis using a cloud-based computing framework, which is suitable for pilot testing or preliminary study, while reserving the flexibility and scalability for large-scale sequencing projects.}, } @article {pmid32947907, year = {2020}, author = {Silva, LAZD and Vidal, VF and Honório, LM and Dantas, MAR and Pinto, MF and Capretz, M}, title = {A Heterogeneous Edge-Fog Environment Supporting Digital Twins for Remote Inspections.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {18}, pages = {}, pmid = {32947907}, issn = {1424-8220}, support = {PD-02651-0013/2017//TBE, EDP and ANEEL - The Brazilian Regullaroty Agency of Electricity/ ; }, abstract = {The increase in the development of digital twins brings several advantages to inspection and maintenance, but also new challenges. Digital models capable of representing real equipment for full remote inspection demand the synchronization, integration, and fusion of several sensors and methodologies such as stereo vision, monocular Simultaneous Localization and Mapping (SLAM), laser and RGB-D camera readings, texture analysis, filters, thermal, and multi-spectral images. This multidimensional information makes it possible to have a full understanding of given equipment, enabling remote diagnosis. To solve this problem, the present work uses an edge-fog-cloud architecture running over a publisher-subscriber communication framework to optimize the computational costs and throughput. In this approach, each process is embedded in an edge node responsible for prepossessing a given amount of data that optimizes the trade-off of processing capabilities and throughput delays. All information is integrated with different levels of fog nodes and a cloud server to maximize performance. To demonstrate this proposal, a real-time 3D reconstruction problem using moving cameras is shown. In this scenario, a stereo and RDB-D cameras run over edge nodes, filtering, and prepossessing the initial data. Furthermore, the point cloud and image registration, odometry, and filtering run over fog clusters. A cloud server is responsible for texturing and processing the final results. This approach enables us to optimize the time lag between data acquisition and operator visualization, and it is easily scalable if new sensors and algorithms must be added. The experimental results will demonstrate precision by comparing the results with ground-truth data, scalability by adding further readings and performance.}, } @article {pmid32943798, year = {2020}, author = {Moreno-Martínez, Á and Izquierdo-Verdiguier, E and Maneta, MP and Camps-Valls, G and Robinson, N and Muñoz-Marí, J and Sedano, F and Clinton, N and Running, SW}, title = {Multispectral high resolution sensor fusion for smoothing and gap-filling in the cloud.}, journal = {Remote sensing of environment}, volume = {247}, number = {}, pages = {111901}, pmid = {32943798}, issn = {0034-4257}, support = {80NSSC18M0025/ImNASA/Intramural NASA/United States ; }, abstract = {Remote sensing optical sensors onboard operational satellites cannot have high spectral, spatial and temporal resolutions simultaneously. In addition, clouds and aerosols can adversely affect the signal contaminating the land surface observations. We present a HIghly Scalable Temporal Adaptive Reflectance Fusion Model (HISTARFM) algorithm to combine multispectral images of different sensors to reduce noise and produce monthly gap free high resolution (30 m) observations over land. Our approach uses images from the Landsat (30 m spatial resolution and 16 day revisit cycle) and the MODIS missions, both from Terra and Aqua platforms (500 m spatial resolution and daily revisit cycle). We implement a bias-aware Kalman filter method in the Google Earth Engine (GEE) platform to obtain fused images at the Landsat spatial-resolution. The added bias correction in the Kalman filter estimates accounts for the fact that both model and observation errors are temporally auto-correlated and may have a non-zero mean. This approach also enables reliable estimation of the uncertainty associated with the final reflectance estimates, allowing for error propagation analyses in higher level remote sensing products. Quantitative and qualitative evaluations of the generated products through comparison with other state-of-the-art methods confirm the validity of the approach, and open the door to operational applications at enhanced spatio-temporal resolutions at broad continental scales.}, } @article {pmid32942759, year = {2020}, author = {Platt, S and Sanabria-Russo, L and Oliver, M}, title = {CoNTe: A Core Network Temporal Blockchain for 5G.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {18}, pages = {}, pmid = {32942759}, issn = {1424-8220}, support = {AEI/FEDER TEC2016-79510//Generalitat de Catalunya/ ; GR2017-2019//Generalitat de Catalunya/ ; }, abstract = {Virtual Network Functions allow the effective separation between hardware and network functionality, a strong paradigm shift from previously tightly integrated monolithic, vendor, and technology dependent deployments. In this virtualized paradigm, all aspects of network operations can be made to deploy on demand, dynamically scale, as well as be shared and interworked in ways that mirror behaviors of general cloud computing. To date, although seeing rising demand, distributed ledger technology remains largely incompatible in such elastic deployments, by its nature as functioning as an immutable record store. This work focuses on the structural incompatibility of current blockchain designs and proposes a novel, temporal blockchain design built atop federated byzantine agreement, which has the ability to dynamically scale and be packaged as a Virtual Network Function (VNF) for the 5G Core.}, } @article {pmid32940684, year = {2020}, author = {Mayfield, CA and Gigler, ME and Snapper, L and Jose, J and Tynan, J and Scott, VC and Dulin, M}, title = {Using cloud-based, open-source technology to evaluate, improve, and rapidly disseminate community-based intervention data.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {27}, number = {11}, pages = {1741-1746}, pmid = {32940684}, issn = {1527-974X}, mesh = {*Cloud Computing ; Community Health Services/*organization & administration ; Data Collection ; *Data Management ; Databases, Factual ; Humans ; Information Dissemination/*methods ; North Carolina ; Ownership ; Pilot Projects ; Social Determinants of Health ; Software ; *Stakeholder Participation ; }, abstract = {Building Uplifted Families (BUF) is a cross-sector community initiative to improve health and economic disparities in Charlotte, North Carolina. A formative evaluation strategy was used to support iterative process improvement and collaborative engagement of cross-sector partners. To address challenges with electronic data collection through REDCap Cloud, we developed the BUF Rapid Dissemination (BUF-RD) model, a multistage data governance system supplemented by open-source technologies, such as: Stage 1) data collection; Stage 2) data integration and analysis; and Stage 3) dissemination. In Stage 3, results were disseminated through an interactive dashboard developed in RStudio using RShiny and Shiny Server solutions. The BUF-RD model was successfully deployed in a 6-month beta test to reduce the time lapse between data collection and dissemination from 3 months to 2 weeks. Having up-to-date preliminary results led to improved BUF implementation, enhanced stakeholder engagement, and greater responsiveness and alignment of program resources to specific participant needs.}, } @article {pmid32939771, year = {2020}, author = {Mehraeen, M and Dadkhah, M and Mehraeen, A}, title = {Investigating the capabilities of information technologies to support policymaking in COVID-19 crisis management; a systematic review and expert opinions.}, journal = {European journal of clinical investigation}, volume = {50}, number = {11}, pages = {e13391}, doi = {10.1111/eci.13391}, pmid = {32939771}, issn = {1365-2362}, mesh = {COVID-19 ; Coronavirus Infections/epidemiology/*prevention & control ; Female ; Health Policy ; Humans ; Information Technology/*statistics & numerical data ; Internet ; Machine Learning ; Male ; *Outcome Assessment, Health Care ; Pandemics/*prevention & control/statistics & numerical data ; Pneumonia, Viral/epidemiology/*prevention & control ; Policy Making ; Social Media ; }, abstract = {BACKGROUND: Today, numerous countries are fighting to protect themselves against the Covid-19 crisis, while the policymakers are confounded and empty handed in dealing with this chaotic circumstance. The infection and its impacts have made it difficult to make optimal and suitable decisions. New information technologies play significant roles in such critical situations to address and relieve stress during the coronavirus crisis. This article endeavours to recognize the challenges policymakers have typically experienced during pandemic diseases, including Covid-19, and, accordingly, new information technology capabilities to encounter with them.

MATERIAL AND METHODS: The current study utilizes the synthesis of findings of experts' opinions within the systematic review process as the research method to recognize the best available evidence drawn from text and opinion to offer practical guidance for policymakers.

RESULTS: The results illustrate that the challenges fall into two categories including; encountering the disease and reducing the results of the disease. Furthermore, Internet of things, cloud computing, machine learning and social networking play the most significant roles to address these challenges.}, } @article {pmid32938391, year = {2020}, author = {Albrecht, B and Bağcı, C and Huson, DH}, title = {MAIRA- real-time taxonomic and functional analysis of long reads on a laptop.}, journal = {BMC bioinformatics}, volume = {21}, number = {Suppl 13}, pages = {390}, pmid = {32938391}, issn = {1471-2105}, mesh = {Classification/*methods ; Computers/*standards ; Humans ; Metagenomics/*methods ; }, abstract = {BACKGROUND: Advances in mobile sequencing devices and laptop performance make metagenomic sequencing and analysis in the field a technologically feasible prospect. However, metagenomic analysis pipelines are usually designed to run on servers and in the cloud.

RESULTS: MAIRA is a new standalone program for interactive taxonomic and functional analysis of long read metagenomic sequencing data on a laptop, without requiring external resources. The program performs fast, online, genus-level analysis, and on-demand, detailed taxonomic and functional analysis. It uses two levels of frame-shift-aware alignment of DNA reads against protein reference sequences, and then performs detailed analysis using a protein synteny graph.

CONCLUSIONS: We envision this software being used by researchers in the field, when access to servers or cloud facilities is difficult, or by individuals that do not routinely access such facilities, such as medical researchers, crop scientists, or teachers.}, } @article {pmid32937865, year = {2020}, author = {Koubaa, A and Ammar, A and Alahdab, M and Kanhouch, A and Azar, AT}, title = {DeepBrain: Experimental Evaluation of Cloud-Based Computation Offloading and Edge Computing in the Internet-of-Drones for Deep Learning Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {18}, pages = {}, pmid = {32937865}, issn = {1424-8220}, abstract = {Unmanned Aerial Vehicles (UAVs) have been very effective in collecting aerial images data for various Internet-of-Things (IoT)/smart cities applications such as search and rescue, surveillance, vehicle detection, counting, intelligent transportation systems, to name a few. However, the real-time processing of collected data on edge in the context of the Internet-of-Drones remains an open challenge because UAVs have limited energy capabilities, while computer vision techniquesconsume excessive energy and require abundant resources. This fact is even more critical when deep learning algorithms, such as convolutional neural networks (CNNs), are used for classification and detection. In this paper, we first propose a system architecture of computation offloading for Internet-connected drones. Then, we conduct a comprehensive experimental study to evaluate the performance in terms of energy, bandwidth, and delay of the cloud computation offloading approach versus the edge computing approach of deep learning applications in the context of UAVs. In particular, we investigate the tradeoff between the communication cost and the computation of the two candidate approaches experimentally. The main results demonstrate that the computation offloading approach allows us to provide much higher throughput (i.e., frames per second) as compared to the edge computing approach, despite the larger communication delays.}, } @article {pmid32934216, year = {2020}, author = {Wang, S and Di Tommaso, S and Deines, JM and Lobell, DB}, title = {Mapping twenty years of corn and soybean across the US Midwest using the Landsat archive.}, journal = {Scientific data}, volume = {7}, number = {1}, pages = {307}, pmid = {32934216}, issn = {2052-4463}, abstract = {Field-level monitoring of crop types in the United States via the Cropland Data Layer (CDL) has played an important role in improving production forecasts and enabling large-scale study of agricultural inputs and outcomes. Although CDL offers crop type maps across the conterminous US from 2008 onward, such maps are missing in many Midwestern states or are uneven in quality before 2008. To fill these data gaps, we used the now-public Landsat archive and cloud computing services to map corn and soybean at 30 m resolution across the US Midwest from 1999-2018. Our training data were CDL from 2008-2018, and we validated the predictions on CDL 1999-2007 where available, county-level crop acreage statistics, and state-level crop rotation statistics. The corn-soybean maps, which we call the Corn-Soy Data Layer (CSDL), are publicly hosted on Google Earth Engine and also available for download online.}, } @article {pmid32927672, year = {2020}, author = {Utomo, D and Hsiung, PA}, title = {A Multitiered Solution for Anomaly Detection in Edge Computing for Smart Meters.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {18}, pages = {}, pmid = {32927672}, issn = {1424-8220}, abstract = {In systems connected to smart grids, smart meters with fast and efficient responses are very helpful in detecting anomalies in realtime. However, sending data with a frequency of a minute or less is not normal with today's technology because of the bottleneck of the communication network and storage media. Because mitigation cannot be done in realtime, we propose prediction techniques using Deep Neural Network (DNN), Support Vector Regression (SVR), and k-Nearest Neighbors (KNN). In addition to these techniques, the prediction timestep is chosen per day and wrapped in sliding windows, and clustering using Kmeans and intersection Kmeans and HDBSCAN is also evaluated. The predictive ability applied here is to predict whether anomalies in electricity usage will occur in the next few weeks. The aim is to give the user time to check their usage and from the utility side, whether it is necessary to prepare a sufficient supply. We also propose the latency reduction to counter higher latency as in the traditional centralized system by adding layer Edge Meter Data Management System (MDMS) and Cloud-MDMS as the inference and training model. Based on the experiments when running in the Raspberry Pi, the best solution is choosing DNN that has the shortest latency 1.25 ms, 159 kB persistent file size, and at 128 timesteps.}, } @article {pmid32906056, year = {2020}, author = {Mrozek, D}, title = {A review of Cloud computing technologies for comprehensive microRNA analyses.}, journal = {Computational biology and chemistry}, volume = {88}, number = {}, pages = {107365}, doi = {10.1016/j.compbiolchem.2020.107365}, pmid = {32906056}, issn = {1476-928X}, mesh = {Big Data ; *Cloud Computing ; Humans ; *Machine Learning ; MicroRNAs/*analysis ; }, abstract = {Cloud computing revolutionized many fields that require ample computational power. Cloud platforms may also provide huge support for microRNA analysis mainly through disclosing scalable resources of different types. In Clouds, these resources are available as services, which simplifies their allocation and releasing. This feature is especially useful during the analysis of large volumes of data, like the one produced by next generation sequencing experiments, which require not only extended storage space but also a distributed computing environment. In this paper, we show which of the Cloud properties and service models can be especially beneficial for microRNA analysis. We also explain the most useful services of the Cloud (including storage space, computational power, web application hosting, machine learning models, and Big Data frameworks) that can be used for microRNA analysis. At the same time, we review several solutions for microRNA and show that the utilization of the Cloud in this field is still weak, but can increase in the future when the awareness of their applicability grows.}, } @article {pmid32904507, year = {2020}, author = {Long, E and Chen, J and Wu, X and Liu, Z and Wang, L and Jiang, J and Li, W and Zhu, Y and Chen, C and Lin, Z and Li, J and Li, X and Chen, H and Guo, C and Zhao, L and Nie, D and Liu, X and Liu, X and Dong, Z and Yun, B and Wei, W and Xu, F and Lv, J and Li, M and Ling, S and Zhong, L and Chen, J and Zheng, Q and Zhang, L and Xiang, Y and Tan, G and Huang, K and Xiang, Y and Lin, D and Zhang, X and Dongye, M and Wang, D and Chen, W and Liu, X and Lin, H and Liu, Y}, title = {Artificial intelligence manages congenital cataract with individualized prediction and telehealth computing.}, journal = {NPJ digital medicine}, volume = {3}, number = {}, pages = {112}, pmid = {32904507}, issn = {2398-6352}, abstract = {A challenge of chronic diseases that remains to be solved is how to liberate patients and medical resources from the burdens of long-term monitoring and periodic visits. Precise management based on artificial intelligence (AI) holds great promise; however, a clinical application that fully integrates prediction and telehealth computing has not been achieved, and further efforts are required to validate its real-world benefits. Taking congenital cataract as a representative, we used Bayesian and deep-learning algorithms to create CC-Guardian, an AI agent that incorporates individualized prediction and scheduling, and intelligent telehealth follow-up computing. Our agent exhibits high sensitivity and specificity in both internal and multi-resource validation. We integrate our agent with a web-based smartphone app and prototype a prediction-telehealth cloud platform to support our intelligent follow-up system. We then conduct a retrospective self-controlled test validating that our system not only accurately detects and addresses complications at earlier stages, but also reduces the socioeconomic burdens compared to conventional methods. This study represents a pioneering step in applying AI to achieve real medical benefits and demonstrates a novel strategy for the effective management of chronic diseases.}, } @article {pmid32878202, year = {2020}, author = {Hwang, YW and Lee, IY}, title = {A Study on CP-ABE-based Medical Data Sharing System with Key Abuse Prevention and Verifiable Outsourcing in the IoMT Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {17}, pages = {}, pmid = {32878202}, issn = {1424-8220}, abstract = {Recent developments in cloud computing allow data to be securely shared between users. This can be used to improve the quality of life of patients and medical staff in the Internet of Medical Things (IoMT) environment. However, in the IoMT cloud environment, there are various security threats to the patient's medical data. As a result, security features such as encryption of collected data and access control by legitimate users are essential. Many studies have been conducted on access control techniques using ciphertext-policy attribute-based encryption (CP-ABE), a form of attribute-based encryption, among various security technologies and studies are underway to apply them to the medical field. However, several problems persist. First, as the secret key does not identify the user, the user may maliciously distribute the secret key and such users cannot be tracked. Second, Attribute-Based Encryption (ABE) increases the size of the ciphertext depending on the number of attributes specified. This wastes cloud storage, and computational times are high when users decrypt. Such users must employ outsourcing servers. Third, a verification process is needed to prove that the results computed on the outsourcing server are properly computed. This paper focuses on the IoMT environment for a study of a CP-ABE-based medical data sharing system with key abuse prevention and verifiable outsourcing in a cloud environment. The proposed scheme can protect the privacy of user data stored in a cloud environment in the IoMT field, and if there is a problem with the secret key delegated by the user, it can trace a user who first delegated the key. This can prevent the key abuse problem. In addition, this scheme reduces the user's burden when decoding ciphertext and calculates accurate results through a server that supports constant-sized ciphertext output and verifiable outsourcing technology. The goal of this paper is to propose a system that enables patients and medical staff to share medical data safely and efficiently in an IoMT environment.}, } @article {pmid32876754, year = {2020}, author = {Nguyen, UNT and Pham, LTH and Dang, TD}, title = {Correction to: an automatic water detection approach using Landsat 8 OLI and Google earth engine cloud computing to map lakes and reservoirs in New Zealand.}, journal = {Environmental monitoring and assessment}, volume = {192}, number = {9}, pages = {616}, doi = {10.1007/s10661-020-08581-y}, pmid = {32876754}, issn = {1573-2959}, abstract = {In the published article:"An automatic water detection approach using Landsat 8 OLI and Google Earth Engine cloud computing to map lakes and reservoirs in New Zealand", the Acknowledgements was published incorrectly and funding statement was missing.}, } @article {pmid32868955, year = {2020}, author = {Mei, L and Rozanov, V and Burrows, JP}, title = {A fast and accurate radiative transfer model for aerosol remote sensing.}, journal = {Journal of quantitative spectroscopy & radiative transfer}, volume = {256}, number = {}, pages = {107270}, pmid = {32868955}, issn = {0022-4073}, abstract = {After several decades' development of retrieval techniques in aerosol remote sensing, no fast and accurate analytical Radiative Transfer Model (RTM) has been developed and applied to create global aerosol products for non-polarimetric instruments such as Ocean and Land Colour Instrument/Sentinel-3 (OLCI/Sentinel-3) and Meteosat Second Generation/Spinning Enhanced Visible and Infrared Imager (MSG/SEVIRI). Global aerosol retrieval algorithms are typically based on a Look-Up-Table (LUT) technique, requiring high-performance computers. The current eXtensible Bremen Aerosol/cloud and surfacE parameters Retrieval (XBAER) algorithm also utilizes the LUT method. In order to have a near-real time retrieval and achieve a quick and accurate "FIRST-LOOK" aerosol product without high-demand of computing resource, we have developed a Fast and Accurate Semi-analytical Model of Atmosphere-surface Reflectance (FASMAR) for aerosol remote sensing. The FASMAR is developed based on a successive order of scattering technique. In FASMAR, the first three orders of scattering are calculated exactly. The contribution of higher orders of scattering is estimated using an extrapolation technique and an additional correction function. The evaluation of FASMAR has been performed by comparing with radiative transfer model SCIATRAN for all typical observation/illumination geometries, surface/aerosol conditions, and wavelengths 412, 550, 670, 870, 1600, 2100 nm used for aerosol remote sensing. The selected observation/illumination conditions are based on the observations from both geostationary satellite (e.g. MSG/SEVIRI) and polar-orbit satellite (e.g. OLCI/Sentinel-3). The percentage error of the top of atmosphere reflectance calculated by FASMAR is within ± 3% for typical polar-orbit/geostationary satellites' observation/illumination geometries. The accuracy decreases for solar and viewing zenith angles larger than 70[∘]. However, even in such cases, the error is within the range ± 5%. The evaluation of model performance also shows that FASMAR can be used for all typical surfaces with albedo in the interval [ 0 - 1 ] and aerosol with optical thickness in the range [ 0.01 - 1 ] .}, } @article {pmid32863440, year = {2020}, author = {Wang, X and Xiao, X and Zou, Z and Chen, B and Ma, J and Dong, J and Doughty, RB and Zhong, Q and Qin, Y and Dai, S and Li, X and Zhao, B and Li, B}, title = {Tracking annual changes of coastal tidal flats in China during 1986-2016 through analyses of Landsat images with Google Earth Engine.}, journal = {Remote sensing of environment}, volume = {238}, number = {}, pages = {}, pmid = {32863440}, issn = {0034-4257}, support = {R01 AI101028/AI/NIAID NIH HHS/United States ; }, abstract = {Tidal flats (non-vegetated area), along with coastal vegetation area, constitute the coastal wetlands (intertidal zone) between high and low water lines, and play an important role in wildlife, biodiversity and biogeochemical cycles. However, accurate annual maps of coastal tidal flats over the last few decades are unavailable and their spatio-temporal changes in China are unknown. In this study, we analyzed all the available Landsat TM/ETM+/OLI imagery (~ 44,528 images) using the Google Earth Engine (GEE) cloud computing platform and a robust decision tree algorithm to generate annual frequency maps of open surface water body and vegetation to produce annual maps of coastal tidal flats in eastern China from 1986 to 2016 at 30-m spatial resolution. The resulting map of coastal tidal flats in 2016 was evaluated using very high-resolution images available in Google Earth. The total area of coastal tidal flats in China in 2016 was about 731,170 ha, mostly distributed in the provinces around Yellow River Delta and Pearl River Delta. The interannual dynamics of coastal tidal flats area in China over the last three decades can be divided into three periods: a stable period during 1986-1992, an increasing period during 1993-2001 and a decreasing period during 2002-2016. The resulting annual coastal tidal flats maps could be used to support sustainable coastal zone management policies that preserve coastal ecosystem services and biodiversity in China.}, } @article {pmid32857770, year = {2020}, author = {Samea, F and Azam, F and Rashid, M and Anwar, MW and Haider Butt, W and Muzaffar, AW}, title = {A model-driven framework for data-driven applications in serverless cloud computing.}, journal = {PloS one}, volume = {15}, number = {8}, pages = {e0237317}, pmid = {32857770}, issn = {1932-6203}, mesh = {*Cloud Computing ; Data Science/*methods ; *Models, Theoretical ; *Software ; }, abstract = {In a serverless cloud computing environment, the cloud provider dynamically manages the allocation of resources whereas the developers purely focus on their applications. The data-driven applications in serverless cloud computing mainly address the web as well as other distributed scenarios, and therefore, it is essential to offer a consistent user experience across different connection types. In order to address the issues of data-driven application in a real-time distributed environment, the use of GraphQL (Graph Query Language) is getting more and more popularity in state-of-the-art cloud computing approaches. However, the existing solutions target the low level implementation of GraphQL, for the development of a complex data-driven application, which may lead to several errors and involve a significant amount of development efforts due to various users' requirements in real-time. Therefore, it is critical to simplify the development process of data-driven applications in a serverless cloud computing environment. Consequently, this research introduces UMLPDA (Unified Modeling Language Profile for Data-driven Applications), which adopts the concepts of UML-based Model-driven Architectures to model the frontend as well as the backend requirements for data-driven applications developed at a higher abstraction level. Particularly, a modeling approach is proposed to resolve the development complexities such as data communication and synchronization. Subsequently, a complete open source transformation engine is developed using a Model-to-Text approach to automatically generate the frontend as well as backend low level implementations of Angular2 and GraphQL respectively. The validation of proposed work is performed with three different case studies, deployed on Amazon Web Services platform. The results show that the proposed framework enables to develop the data-driven applications with simplicity.}, } @article {pmid32857296, year = {2020}, author = {Fuentes, H and Mauricio, D}, title = {Smart water consumption measurement system for houses using IoT and cloud computing.}, journal = {Environmental monitoring and assessment}, volume = {192}, number = {9}, pages = {602}, pmid = {32857296}, issn = {1573-2959}, mesh = {Algorithms ; *Cloud Computing ; *Drinking ; Environmental Monitoring ; }, abstract = {Presently, in several parts of the world, water consumption is not measured or visualized in real time, in addition, water leaks are not detected in time and with high precision, generating unnecessary waste of water. That is why this article presents the implementation of a smart water measurement consumption system under an architecture design, with high decoupling and integration of various technologies, which allows real-time visualizing the consumptions, in addition, a leak detection algorithm is proposed based on rules, historical context, and user location that manages to cover 10 possible water consumption scenarios between normal and anomalous consumption. The system allows data to be collected by a smart meter, which is preprocessed by a local server (Gateway) and sent to the Cloud from time to time to be analyzed by the leak detection algorithm and, simultaneously, be viewed on a web interface. The results show that the algorithm has 100% Accuracy, Recall, Precision, and F1 score to detect leaks, far better than other procedures, and a margin of error of 4.63% recorded by the amount of water consumed.}, } @article {pmid32852146, year = {2020}, author = {Pang, R and Wei, Z and Liu, W and Chen, Z and Cheng, X and Zhang, H and Li, G and Liu, L}, title = {Influence of the pandemic dissemination of COVID-19 on facial rejuvenation: A survey of Twitter.}, journal = {Journal of cosmetic dermatology}, volume = {19}, number = {11}, pages = {2778-2784}, pmid = {32852146}, issn = {1473-2165}, support = {20A320033//The University key scientific research project of Henan Province/ ; }, mesh = {*Betacoronavirus ; COVID-19 ; Coronavirus Infections/*epidemiology ; *Cosmetic Techniques ; Face ; Humans ; Pandemics ; Pneumonia, Viral/*epidemiology ; *Public Opinion ; *Rejuvenation ; SARS-CoV-2 ; *Social Media ; }, abstract = {BACKGROUND: With the pandemic dissemination of COVID-19, attitude and sentiment surrounding facial rejuvenation have evolved rapidly.

AIMS: The purpose of this study was to understanding the impact of pandemic on the attitude of people toward facial skin rejuvenation.

METHODS: Twitter data related to facial rejuvenation were collected from January 1, 2020, to April 30, 2020. Sentiment analysis, frequency analysis, and word cloud were performed to analyze the data. Statistical analysis included two-tailed t tests and chi-square tests.

RESULTS: In the post-declaration, the number of tweets about facial rejuvenation increased significantly, and the search volume in Google Trends decreased. Negative public emotions increased, but positive emotions still dominate. The words frequency of "discounts" and "purchase" decreased. The dominant words in word cloud were "Botox," "facelift," "hyaluronic," and "skin."

CONCLUSION: The public has a positive attitude toward facial rejuvenation during the pandemic. In particular, minimally invasive procedures dominate the mainstream, such as "Botox," "Hyaluronic acid," and "PRP." The practitioners could understand the change of the public interest in facial rejuvenation in time and decide what to focus on.}, } @article {pmid32839626, year = {2020}, author = {Mahmood, T and Mubarik, MS}, title = {Balancing innovation and exploitation in the fourth industrial revolution: Role of intellectual capital and technology absorptive capacity.}, journal = {Technological forecasting and social change}, volume = {160}, number = {}, pages = {120248}, pmid = {32839626}, issn = {0040-1625}, abstract = {Industry 4.0, which features the Internet of things (IoT), cloud computing, big-data, digitalization, and cyber-physical systems, is transforming the way businesses are being run. It is making the business processes more autonomous, automated and intelligent, and is transmuting the organizational structures of businesses by digitalizing their end-to-end business processes. In this context, balancing innovation and exploitation-organization's ambidexterity-while stepping into the fourth industrial revolution can be critical for organizational capability. This study examines the role of intellectual capital (IC)-human capital, structural capital and relational capital-in balancing the innovation and exploitation activities. It also examines the role of technology's absorptive capacity in the relationship between IC and organizational ambidexterity (OA). Data were collected from 217 small and medium enterprises from the manufacturing sector of Pakistan using a closed-ended Likert scale-based questionnaire. The study employs partial least square-Structural Equation Modeling (PLS-SEM) for data analysis. Findings indicate a profound influence of all dimensions of IC, both overall and by dimensions on organizations' ambidexterity. Findings also exhibit a significant partial mediating role of technology absorptive capacity (TAC) in the association of IC and ambidexterity. The findings of the study emphasize the creation of specific policies aimed to develop IC of a firm, which in turn can enable a firm to maintain a balance between innovation and market exploitation activities. The study integrates the TAC with the IC-OA relationship, which is the novelty of the study.}, } @article {pmid32837593, year = {2021}, author = {Hsu, IC and Chang, CC}, title = {Integrating machine learning and open data into social Chatbot for filtering information rumor.}, journal = {Journal of ambient intelligence and humanized computing}, volume = {12}, number = {1}, pages = {1023-1037}, pmid = {32837593}, issn = {1868-5137}, abstract = {Social networks have become a major platform for people to disseminate information, which can include negative rumors. In recent years, rumors on social networks has caused grave problems and considerable damages. We attempted to create a method to verify information from numerous social media messages. We propose a general architecture that integrates machine learning and open data with a Chatbot and is based cloud computing (MLODCCC), which can assist users in evaluating information authenticity on social platforms. The proposed MLODCCC architecture consists of six integrated modules: cloud computing, machine learning, data preparation, open data, chatbot, and intelligent social application modules. Food safety has garnered worldwide attention. Consequently, we used the proposed MLODCCC architecture to develop a Food Safety Information Platform (FSIP) that provides a friendly hyperlink and chatbot interface on Facebook to identify credible food safety information. The performance and accuracy of three binary classification algorithms, namely the decision tree, logistic regression, and support vector machine algorithms, operating in different cloud computing environments were compared. The binary classification accuracy was 0.769, which indicates that the proposed approach accurately classifies using the developed FSIP.}, } @article {pmid32837253, year = {2020}, author = {Ghinita, G and Nguyen, K and Maruseac, M and Shahabi, C}, title = {A secure location-based alert system with tunable privacy-performance trade-off.}, journal = {GeoInformatica}, volume = {24}, number = {4}, pages = {951-985}, pmid = {32837253}, issn = {1384-6175}, abstract = {Monitoring location updates from mobile users has important applications in many areas, ranging from public health (e.g., COVID-19 contact tracing) and national security to social networks and advertising. However, sensitive information can be derived from movement patterns, thus protecting the privacy of mobile users is a major concern. Users may only be willing to disclose their locations when some condition is met, for instance in proximity of a disaster area or an event of interest. Currently, such functionality can be achieved using searchable encryption. Such cryptographic primitives provide provable guarantees for privacy, and allow decryption only when the location satisfies some predicate. Nevertheless, they rely on expensive pairing-based cryptography (PBC), of which direct application to the domain of location updates leads to impractical solutions. We propose secure and efficient techniques for private processing of location updates that complement the use of PBC and lead to significant gains in performance by reducing the amount of required pairing operations. We implement two optimizations that further improve performance: materialization of results to expensive mathematical operations, and parallelization. We also propose an heuristic that brings down the computational overhead through enlarging an alert zone by a small factor (given as system parameter), therefore trading off a small and controlled amount of privacy for significant performance gains. Extensive experimental results show that the proposed techniques significantly improve performance compared to the baseline, and reduce the searchable encryption overhead to a level that is practical in a computing environment with reasonable resources, such as the cloud.}, } @article {pmid32837247, year = {2022}, author = {Ibrahim, AU and Al-Turjman, F and Sa'id, Z and Ozsoz, M}, title = {Futuristic CRISPR-based biosensing in the cloud and internet of things era: an overview.}, journal = {Multimedia tools and applications}, volume = {81}, number = {24}, pages = {35143-35171}, pmid = {32837247}, issn = {1380-7501}, abstract = {Biosensors-based devices are transforming medical diagnosis of diseases and monitoring of patient signals. The development of smart and automated molecular diagnostic tools equipped with biomedical big data analysis, cloud computing and medical artificial intelligence can be an ideal approach for the detection and monitoring of diseases, precise therapy, and storage of data over the cloud for supportive decisions. This review focused on the use of machine learning approaches for the development of futuristic CRISPR-biosensors based on microchips and the use of Internet of Things for wireless transmission of signals over the cloud for support decision making. The present review also discussed the discovery of CRISPR, its usage as a gene editing tool, and the CRISPR-based biosensors with high sensitivity of Attomolar (10[-18] M), Femtomolar (10[-15] M) and Picomolar (10[-12] M) in comparison to conventional biosensors with sensitivity of nanomolar 10[-9] M and micromolar 10[-3] M. Additionally, the review also outlines limitations and open research issues in the current state of CRISPR-based biosensing applications.}, } @article {pmid32837246, year = {2021}, author = {Al-Zinati, M and Alrashdan, R and Al-Duwairi, B and Aloqaily, M}, title = {A re-organizing biosurveillance framework based on fog and mobile edge computing.}, journal = {Multimedia tools and applications}, volume = {80}, number = {11}, pages = {16805-16825}, pmid = {32837246}, issn = {1380-7501}, abstract = {Biological threats are becoming a serious security issue for many countries across the world. Effective biosurveillance systems can primarily support appropriate responses to biological threats and consequently save human lives. Nevertheless, biosurveillance systems are costly to implement and hard to operate. Furthermore, they rely on static infrastructures that might not cope with the evolving dynamics of the monitored environment. In this paper, we present a reorganizing biosurveillance framework for the detection and localization of biological threats with fog and mobile edge computing support. In the proposed framework, a hierarchy of fog nodes are responsible for aggregating monitoring data within their regions and detecting potential threats. Although fog nodes are deployed on a fixed base station infrastructure, the framework provides an innovative technique for reorganizing the monitored environment structure to adapt to the evolving environmental conditions and to overcome the limitations of the static base station infrastructure. Evaluation results illustrate the ability of the framework to localize biological threats and detect infected areas. Moreover, the results show the effectiveness of the reorganization mechanisms in adjusting the environment structure to cope with the highly dynamic environment.}, } @article {pmid32835313, year = {2020}, author = {Blair, GS}, title = {A Tale of Two Cities: Reflections on Digital Technology and the Natural Environment.}, journal = {Patterns (New York, N.Y.)}, volume = {1}, number = {5}, pages = {100068}, pmid = {32835313}, issn = {2666-3899}, abstract = {Contemporary digital technologies can make a profound impact on our understanding of the natural environment in moving toward sustainable futures. Examples of such technologies included sources of new data (e.g., an environmental Internet of Things), the ability to storage and process the large datasets that will result from this (e.g., through cloud computing), and the potential of data science and AI to make sense of these data alongside human experts. However, these same trends pose a threat to sustainable futures through, for example, the carbon footprint of digital technology and the risks of this escalating through the very trends mentioned above.}, } @article {pmid32825602, year = {2020}, author = {Li, H and Lan, C and Fu, X and Wang, C and Li, F and Guo, H}, title = {A Secure and Lightweight Fine-Grained Data Sharing Scheme for Mobile Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {17}, pages = {}, pmid = {32825602}, issn = {1424-8220}, support = {No. 61602080, No. 61602084//National Natural Science Foundation of China/ ; No. GCIS201718//Guangxi Key Laboratory of Cryptography and Information Security/ ; No. LY19F020045//Natural Science Foundation of Zhejiang Province/ ; No. 2017B030314131-05//the Opening Project of Guangdong Provincial Key Laboratory of Information Security Technology/ ; No. Y201636547//the Department of Education of Zhejiang Province of China/ ; No. 2017C01062//the Key Research Project of Zhejiang Province/ ; }, abstract = {With the explosion of various mobile devices and the tremendous advancement in cloud computing technology, mobile devices have been seamlessly integrated with the premium powerful cloud computing known as an innovation paradigm named Mobile Cloud Computing (MCC) to facilitate the mobile users in storing, computing and sharing their data with others. Meanwhile, Attribute Based Encryption (ABE) has been envisioned as one of the most promising cryptographic primitives for providing secure and flexible fine-grained "one to many" access control, particularly in large scale distributed system with unknown participators. However, most existing ABE schemes are not suitable for MCC because they involve expensive pairing operations which pose a formidable challenge for resource-constrained mobile devices, thus greatly delaying the widespread popularity of MCC. To this end, in this paper, we propose a secure and lightweight fine-grained data sharing scheme (SLFG-DSS) for a mobile cloud computing scenario to outsource the majority of time-consuming operations from the resource-constrained mobile devices to the resource-rich cloud servers. Different from the current schemes, our novel scheme can enjoy the following promising merits simultaneously: (1) Supporting verifiable outsourced decryption, i.e., the mobile user can ensure the validity of the transformed ciphertext returned from the cloud server; (2) resisting decryption key exposure, i.e., our proposed scheme can outsource decryption for intensive computing tasks during the decryption phase without revealing the user's data or decryption key; (3) achieving a CCA security level; thus, our novel scheme can be applied to the scenarios with higher security level requirement. The concrete security proof and performance analysis illustrate that our novel scheme is proven secure and suitable for the mobile cloud computing environment.}, } @article {pmid32824989, year = {2020}, author = {Sarker, VK and Gia, TN and Ben Dhaou, I and Westerlund, T}, title = {Smart Parking System with Dynamic Pricing, Edge-Cloud Computing and LoRa.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {17}, pages = {}, pmid = {32824989}, issn = {1424-8220}, support = {328755//Academy of Finland/ ; }, abstract = {A rapidly growing number of vehicles in recent years cause long traffic jams and difficulty in the management of traffic in cities. One of the most significant reasons for increased traffic jams on the road is random parking in unauthorized and non-permitted places. In addition, managing of available parking places cannot achieve the expected reduction in traffic congestion related problems due to mismanagement, lack of real-time parking guidance to the drivers, and general ignorance. As the number of roads, highways and related resources has not increased significantly, a rising need for a smart, dynamic and effective parking solution is observed. Accordingly, with the use of multiple sensors, appropriate communication network and advanced processing capabilities of edge and cloud computing, a smart parking system can help manage parking effectively and make it easier for the vehicle owners. In this paper, we propose a multi-layer architecture for smart parking system consisting of multi-parametric parking slot sensor nodes, latest long-range low-power wireless communication technology and Edge-Cloud computation. The proposed system enables dynamic management of parking for large areas while providing useful information to the drivers about available parking locations and related services through near real-time monitoring of vehicles. Furthermore, we propose a dynamic pricing algorithm to yield maximum possible revenue for the parking authority and optimum parking slot availability for the drivers.}, } @article {pmid32824508, year = {2020}, author = {Nguyen, TT and Yeom, YJ and Kim, T and Park, DH and Kim, S}, title = {Horizontal Pod Autoscaling in Kubernetes for Elastic Container Orchestration.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {16}, pages = {}, pmid = {32824508}, issn = {1424-8220}, support = {NRF-2019R1F1A1059408//National Research Foundation of Korea/ ; 2018-0-00387//Institute of Information & Communications Technology Planning & Evaluation/ ; }, abstract = {Kubernetes, an open-source container orchestration platform, enables high availability and scalability through diverse autoscaling mechanisms such as Horizontal Pod Autoscaler (HPA), Vertical Pod Autoscaler and Cluster Autoscaler. Amongst them, HPA helps provide seamless service by dynamically scaling up and down the number of resource units, called pods, without having to restart the whole system. Kubernetes monitors default Resource Metrics including CPU and memory usage of host machines and their pods. On the other hand, Custom Metrics, provided by external software such as Prometheus, are customizable to monitor a wide collection of metrics. In this paper, we investigate HPA through diverse experiments to provide critical knowledge on its operational behaviors. We also discuss the essential difference between Kubernetes Resource Metrics (KRM) and Prometheus Custom Metrics (PCM) and how they affect HPA's performance. Lastly, we provide deeper insights and lessons on how to optimize the performance of HPA for researchers, developers, and system administrators working with Kubernetes in the future.}, } @article {pmid32824288, year = {2020}, author = {Yang, H and Kim, Y}, title = {Design and Implementation of Fast Fault Detection in Cloud Infrastructure for Containerized IoT Services.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {16}, pages = {}, pmid = {32824288}, issn = {1424-8220}, support = {2020-0-00946//Institute for Information and Communications Technology Promotion/ ; IITP-2020-2017-0-01633//Institute for Information and Communications Technology Promotion/ ; }, abstract = {The container-based cloud is used in various service infrastructures as it is lighter and more portable than a virtual machine (VM)-based infrastructure and is configurable in both bare-metal and VM environments. The Internet-of-Things (IoT) cloud-computing infrastructure is also evolving from a VM-based to a container-based infrastructure. In IoT clouds, the service availability of the cloud infrastructure is more important for mission-critical IoT services, such as real-time health monitoring, vehicle-to-vehicle (V2V) communication, and industrial IoT, than for general computing services. However, in the container environment that runs on a VM, the current fault detection method only considers the container's infra, thus limiting the level of availability necessary for the performance of mission-critical IoT cloud services. Therefore, in a container environment running on a VM, fault detection and recovery methods that consider both the VM and container levels are necessary. In this study, we analyze the fault-detection architecture in a container environment and designed and implemented a Fast Fault Detection Manager (FFDM) architecture using OpenStack and Kubernetes for realizing fast fault detection. Through performance measurements, we verified that the FFDM can improve the fault detection time by more than three times over the existing method.}, } @article {pmid32822005, year = {2021}, author = {Zhao, L and Batta, I and Matloff, W and O'Driscoll, C and Hobel, S and Toga, AW}, title = {Neuroimaging PheWAS (Phenome-Wide Association Study): A Free Cloud-Computing Platform for Big-Data, Brain-Wide Imaging Association Studies.}, journal = {Neuroinformatics}, volume = {19}, number = {2}, pages = {285-303}, pmid = {32822005}, issn = {1559-0089}, support = {P41 EB015922/EB/NIBIB NIH HHS/United States ; U54 EB020406/EB/NIBIB NIH HHS/United States ; //CIHR/Canada ; R01 MH094343/MH/NIMH NIH HHS/United States ; U01 AG024904/AG/NIA NIH HHS/United States ; P01 AG012435/AG/NIA NIH HHS/United States ; P30 AG066530/AG/NIA NIH HHS/United States ; }, mesh = {Alzheimer Disease/diagnostic imaging/genetics ; *Big Data ; Brain/*diagnostic imaging ; Case-Control Studies ; *Cloud Computing ; Genome-Wide Association Study/*methods ; Genomics/methods ; Humans ; Imaging, Three-Dimensional/methods ; Neuroimaging/*methods ; *Phenotype ; Polymorphism, Single Nucleotide/genetics ; }, abstract = {Large-scale, case-control genome-wide association studies (GWASs) have revealed genetic variations associated with diverse neurological and psychiatric disorders. Recent advances in neuroimaging and genomic databases of large healthy and diseased cohorts have empowered studies to characterize effects of the discovered genetic factors on brain structure and function, implicating neural pathways and genetic mechanisms in the underlying biology. However, the unprecedented scale and complexity of the imaging and genomic data requires new advanced biomedical data science tools to manage, process and analyze the data. In this work, we introduce Neuroimaging PheWAS (phenome-wide association study): a web-based system for searching over a wide variety of brain-wide imaging phenotypes to discover true system-level gene-brain relationships using a unified genotype-to-phenotype strategy. This design features a user-friendly graphical user interface (GUI) for anonymous data uploading, study definition and management, and interactive result visualizations as well as a cloud-based computational infrastructure and multiple state-of-art methods for statistical association analysis and multiple comparison correction. We demonstrated the potential of Neuroimaging PheWAS with a case study analyzing the influences of the apolipoprotein E (APOE) gene on various brain morphological properties across the brain in the Alzheimer's Disease Neuroimaging Initiative (ADNI) cohort. Benchmark tests were performed to evaluate the system's performance using data from UK Biobank. The Neuroimaging PheWAS system is freely available. It simplifies the execution of PheWAS on neuroimaging data and provides an opportunity for imaging genetics studies to elucidate routes at play for specific genetic variants on diseases in the context of detailed imaging phenotypic data.}, } @article {pmid32800692, year = {2020}, author = {McRoy, C and Patel, L and Gaddam, DS and Rothenberg, S and Herring, A and Hamm, J and Chelala, L and Weinstein, J and Smith, E and Awan, O}, title = {Radiology Education in the Time of COVID-19: A Novel Distance Learning Workstation Experience for Residents.}, journal = {Academic radiology}, volume = {27}, number = {10}, pages = {1467-1474}, pmid = {32800692}, issn = {1878-4046}, mesh = {*Betacoronavirus ; COVID-19 ; *Coronavirus Infections ; *Education, Distance ; *Internship and Residency ; *Pandemics ; *Pneumonia, Viral ; SARS-CoV-2 ; }, abstract = {RATIONALE AND OBJECTIVES: The coronavirus disease of 2019 (COVID-19) pandemic has challenged the educational missions of academic radiology departments nationwide. We describe a novel cloud-based HIPAA compliant and accessible education platform which simulates a live radiology workstation for continued education of first year radiology (R1) residents, with an emphasis on call preparation and peer to peer resident learning.

MATERIALS AND METHODS: Three tools were used in our education model: Pacsbin (Orion Medical Technologies, Baltimore, MD, pacsbin.com), Zoom (Zoom Video Communications, San Jose, CA, zoom.us), and Google Classroom (Google, Mountain View, CA, classroom.google.com). A senior radiology resident (R2-R4) (n = 7) driven workflow was established to provide scrollable Digital Imaging and Communications in Medicine (DICOM) based case collections to the R1 residents (n = 9) via Pacsbin. A centralized classroom was created using Google Classroom for assignments, reports, and discussion where attending radiologists could review content for accuracy. Daily case collections over an 8-week period from March to May were reviewed via Zoom video conference readout in small groups consisting of a R2-R4 teacher and R1 residents. Surveys were administered to R1 residents, R2-4 residents, and attending radiologist participants.

RESULTS: Hundred percent of R1 residents felt this model improved their confidence and knowledge to take independent call. Seventy-eight percent of the R1 residents (n = 7/9) demonstrated strong interest in continuing the project after pandemic related restrictions are lifted. Based on a Likert "helpfulness" scale of 1-5 with 5 being most helpful, the project earned an overall average rating of 4.9. Two R2-R4 teachers demonstrated increased interest in pursuing academic radiology.

CONCLUSION: In response to unique pandemic circumstances, our institution implemented a novel cloud-based distance learning solution to simulate the radiology workstation. This platform helped continue the program's educational mission, offered first year residents increased call preparation, and promoted peer to peer learning. This approach to case-based learning could be used at other institutions to educate residents.}, } @article {pmid32781671, year = {2020}, author = {D'Amico, G and L'Abbate, P and Liao, W and Yigitcanlar, T and Ioppolo, G}, title = {Understanding Sensor Cities: Insights from Technology Giant Company Driven Smart Urbanism Practices.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {16}, pages = {}, pmid = {32781671}, issn = {1424-8220}, abstract = {The data-driven approach to sustainable urban development is becoming increasingly popular among the cities across the world. This is due to cities' attention in supporting smart and sustainable urbanism practices. In an era of digitalization of urban services and processes, which is upon us, platform urbanism is becoming a fundamental tool to support smart urban governance, and helping in the formation of a new version of cities-i.e., City 4.0. This new version utilizes urban dashboards and platforms in its operations and management tasks of its complex urban metabolism. These intelligent systems help in maintaining the robustness of our cities, integrating various sensors (e.g., internet-of-things) and big data analysis technologies (e.g., artificial intelligence) with the aim of optimizing urban infrastructures and services (e.g., water, waste, energy), and turning the urban system into a smart one. The study generates insights from the sensor city best practices by placing some of renowned projects, implemented by Huawei, Cisco, Google, Ericsson, Microsoft, and Alibaba, under the microscope. The investigation findings reveal that the sensor city approach: (a) Has the potential to increase the smartness and sustainability level of cities; (b) Manages to engage citizens and companies in the process of planning, monitoring and analyzing urban processes; (c) Raises awareness on the local environmental, social and economic issues, and; (d) Provides a novel city blueprint for urban administrators, managers and planners. Nonetheless, the use of advanced technologies-e.g., real-time monitoring stations, cloud computing, surveillance cameras-poses a multitude of challenges related to: (a) Quality of the data used; (b) Level of protection of traditional and cybernetic urban security; (c) Necessary integration between the various urban infrastructure, and; (d) Ability to transform feedback from stakeholders into innovative urban policies.}, } @article {pmid32777825, year = {2020}, author = {Giménez-Alventosa, V and Segrelles, JD and Moltó, G and Roca-Sogorb, M}, title = {APRICOT: Advanced Platform for Reproducible Infrastructures in the Cloud via Open Tools.}, journal = {Methods of information in medicine}, volume = {59}, number = {S 02}, pages = {e33-e45}, pmid = {32777825}, issn = {2511-705X}, mesh = {Biological Science Disciplines ; *Cloud Computing ; Computational Biology ; Databases, Factual ; Magnetic Resonance Imaging ; Positron-Emission Tomography ; *Reproducibility of Results ; Research ; *Software ; }, abstract = {BACKGROUND: Scientific publications are meant to exchange knowledge among researchers but the inability to properly reproduce computational experiments limits the quality of scientific research. Furthermore, bibliography shows that irreproducible preclinical research exceeds 50%, which produces a huge waste of resources on nonprofitable research at Life Sciences field. As a consequence, scientific reproducibility is being fostered to promote Open Science through open databases and software tools that are typically deployed on existing computational resources. However, some computational experiments require complex virtual infrastructures, such as elastic clusters of PCs, that can be dynamically provided from multiple clouds. Obtaining these infrastructures requires not only an infrastructure provider, but also advanced knowledge in the cloud computing field.

OBJECTIVES: The main aim of this paper is to improve reproducibility in life sciences to produce better and more cost-effective research. For that purpose, our intention is to simplify the infrastructure usage and deployment for researchers.

METHODS: This paper introduces Advanced Platform for Reproducible Infrastructures in the Cloud via Open Tools (APRICOT), an open source extension for Jupyter to deploy deterministic virtual infrastructures across multiclouds for reproducible scientific computational experiments. To exemplify its utilization and how APRICOT can improve the reproduction of experiments with complex computation requirements, two examples in the field of life sciences are provided. All requirements to reproduce both experiments are disclosed within APRICOT and, therefore, can be reproduced by the users.

RESULTS: To show the capabilities of APRICOT, we have processed a real magnetic resonance image to accurately characterize a prostate cancer using a Message Passing Interface cluster deployed automatically with APRICOT. In addition, the second example shows how APRICOT scales the deployed infrastructure, according to the workload, using a batch cluster. This example consists of a multiparametric study of a positron emission tomography image reconstruction.

CONCLUSION: APRICOT's benefits are the integration of specific infrastructure deployment, the management and usage for Open Science, making experiments that involve specific computational infrastructures reproducible. All the experiment steps and details can be documented at the same Jupyter notebook which includes infrastructure specifications, data storage, experimentation execution, results gathering, and infrastructure termination. Thus, distributing the experimentation notebook and needed data should be enough to reproduce the experiment.}, } @article {pmid32765566, year = {2020}, author = {Apolo-Apolo, OE and Pérez-Ruiz, M and Martínez-Guanter, J and Valente, J}, title = {A Cloud-Based Environment for Generating Yield Estimation Maps From Apple Orchards Using UAV Imagery and a Deep Learning Technique.}, journal = {Frontiers in plant science}, volume = {11}, number = {}, pages = {1086}, pmid = {32765566}, issn = {1664-462X}, abstract = {Farmers require accurate yield estimates, since they are key to predicting the volume of stock needed at supermarkets and to organizing harvesting operations. In many cases, the yield is visually estimated by the crop producer, but this approach is not accurate or time efficient. This study presents a rapid sensing and yield estimation scheme using off-the-shelf aerial imagery and deep learning. A Region-Convolutional Neural Network was trained to detect and count the number of apple fruit on individual trees located on the orthomosaic built from images taken by the unmanned aerial vehicle (UAV). The results obtained with the proposed approach were compared with apple counts made in situ by an agrotechnician, and an R[2] value of 0.86 was acquired (MAE: 10.35 and RMSE: 13.56). As only parts of the tree fruits were visible in the top-view images, linear regression was used to estimate the number of total apples on each tree. An R[2] value of 0.80 (MAE: 128.56 and RMSE: 130.56) was obtained. With the number of fruits detected and tree coordinates two shapefile using Python script in Google Colab were generated. With the previous information two yield maps were displayed: one with information per tree and another with information per tree row. We are confident that these results will help to maximize the crop producers' outputs via optimized orchard management.}, } @article {pmid32753501, year = {2020}, author = {Petit, RA and Read, TD}, title = {Bactopia: a Flexible Pipeline for Complete Analysis of Bacterial Genomes.}, journal = {mSystems}, volume = {5}, number = {4}, pages = {}, pmid = {32753501}, issn = {2379-5077}, support = {U54 CK000485/CK/NCEZID CDC HHS/United States ; U54CK000485/ACL/ACL HHS/United States ; }, abstract = {Sequencing of bacterial genomes using Illumina technology has become such a standard procedure that often data are generated faster than can be conveniently analyzed. We created a new series of pipelines called Bactopia, built using Nextflow workflow software, to provide efficient comparative genomic analyses for bacterial species or genera. Bactopia consists of a data set setup step (Bactopia Data Sets [BaDs]), which creates a series of customizable data sets for the species of interest, the Bactopia Analysis Pipeline (BaAP), which performs quality control, genome assembly, and several other functions based on the available data sets and outputs the processed data to a structured directory format, and a series of Bactopia Tools (BaTs) that perform specific postprocessing on some or all of the processed data. BaTs include pan-genome analysis, computing average nucleotide identity between samples, extracting and profiling the 16S genes, and taxonomic classification using highly conserved genes. It is expected that the number of BaTs will increase to fill specific applications in the future. As a demonstration, we performed an analysis of 1,664 public Lactobacillus genomes, focusing on Lactobacillus crispatus, a species that is a common part of the human vaginal microbiome. Bactopia is an open source system that can scale from projects as small as one bacterial genome to ones including thousands of genomes and that allows for great flexibility in choosing comparison data sets and options for downstream analysis. Bactopia code can be accessed at https://www.github.com/bactopia/bactopiaIMPORTANCE It is now relatively easy to obtain a high-quality draft genome sequence of a bacterium, but bioinformatic analysis requires organization and optimization of multiple open source software tools. We present Bactopia, a pipeline for bacterial genome analysis, as an option for processing bacterial genome data. Bactopia also automates downloading of data from multiple public sources and species-specific customization. Because the pipeline is written in the Nextflow language, analyses can be scaled from individual genomes on a local computer to thousands of genomes using cloud resources. As a usage example, we processed 1,664 Lactobacillus genomes from public sources and used comparative analysis workflows (Bactopia Tools) to identify and analyze members of the L. crispatus species.}, } @article {pmid32751366, year = {2020}, author = {Navarro, E and Costa, N and Pereira, A}, title = {A Systematic Review of IoT Solutions for Smart Farming.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {15}, pages = {}, pmid = {32751366}, issn = {1424-8220}, mesh = {Agriculture/instrumentation/*methods ; Electronic Data Processing ; *Internet of Things ; }, abstract = {The world population growth is increasing the demand for food production. Furthermore, the reduction of the workforce in rural areas and the increase in production costs are challenges for food production nowadays. Smart farming is a farm management concept that may use Internet of Things (IoT) to overcome the current challenges of food production. This work uses the preferred reporting items for systematic reviews (PRISMA) methodology to systematically review the existing literature on smart farming with IoT. The review aims to identify the main devices, platforms, network protocols, processing data technologies and the applicability of smart farming with IoT to agriculture. The review shows an evolution in the way data is processed in recent years. Traditional approaches mostly used data in a reactive manner. In more recent approaches, however, new technological developments allowed the use of data to prevent crop problems and to improve the accuracy of crop diagnosis.}, } @article {pmid32750932, year = {2020}, author = {Ranchal, R and Bastide, P and Wang, X and Gkoulalas-Divanis, A and Mehra, M and Bakthavachalam, S and Lei, H and Mohindra, A}, title = {Disrupting Healthcare Silos: Addressing Data Volume, Velocity and Variety With a Cloud-Native Healthcare Data Ingestion Service.}, journal = {IEEE journal of biomedical and health informatics}, volume = {24}, number = {11}, pages = {3182-3188}, doi = {10.1109/JBHI.2020.3001518}, pmid = {32750932}, issn = {2168-2208}, mesh = {Big Data ; *Cloud Computing ; *Computer Security ; Delivery of Health Care ; Eating ; Humans ; }, abstract = {Healthcare enterprises are starting to adopt cloud computing due to its numerous advantages over traditional infrastructures. This has become a necessity because of the increased volume, velocity and variety of healthcare data, and the need to facilitate data correlation and large-scale analysis. Cloud computing infrastructures have the power to offer continuous acquisition of data from multiple heterogeneous sources, efficient data integration, and big data analysis. At the same time, security, availability, and disaster recovery are critical factors aiding towards the adoption of cloud computing. However, the migration of healthcare workloads to cloud is not straightforward due to the vagueness in healthcare data standards, heterogeneity and sensitive nature of healthcare data, and many regulations that govern its usage. This paper highlights the need for providing healthcare data acquisition using cloud infrastructures and presents the challenges, requirements, use-cases, and best practices for building a state-of-the-art healthcare data ingestion service on cloud.}, } @article {pmid32750051, year = {2020}, author = {Frake, AN and Peter, BG and Walker, ED and Messina, JP}, title = {Leveraging big data for public health: Mapping malaria vector suitability in Malawi with Google Earth Engine.}, journal = {PloS one}, volume = {15}, number = {8}, pages = {e0235697}, pmid = {32750051}, issn = {1932-6203}, support = {D43 TW009639/TW/FIC NIH HHS/United States ; U19 AI089683/AI/NIAID NIH HHS/United States ; }, mesh = {Animals ; Anopheles/physiology ; *Big Data ; Breeding ; Climate ; Humans ; Malaria/*epidemiology/transmission ; Malawi/epidemiology ; Mosquito Vectors/physiology ; *Public Health ; Search Engine ; Seasons ; }, abstract = {In an era of big data, the availability of satellite-derived global climate, terrain, and land cover imagery presents an opportunity for modeling the suitability of malaria disease vectors at fine spatial resolutions, across temporal scales, and over vast geographic extents. Leveraging cloud-based geospatial analytical tools, we present an environmental suitability model that considers water resources, flow accumulation areas, precipitation, temperature, vegetation, and land cover. In contrast to predictive models generated using spatially and temporally discontinuous mosquito presence information, this model provides continuous fine-spatial resolution information on the biophysical drivers of suitability. For the purposes of this study the model is parameterized for Anopheles gambiae s.s. in Malawi for the rainy (December-March) and dry seasons (April-November) in 2017; however, the model may be repurposed to accommodate different mosquito species, temporal periods, or geographical boundaries. Final products elucidate the drivers and potential habitat of Anopheles gambiae s.s. Rainy season results are presented by quartile of precipitation; Quartile four (Q4) identifies areas most likely to become inundated and shows 7.25% of Malawi exhibits suitable water conditions (water only) for Anopheles gambiae s.s., approximately 16% for water plus another factor, and 8.60% is maximally suitable, meeting suitability thresholds for water presence, terrain characteristics, and climatic conditions. Nearly 21% of Malawi is suitable for breeding based on land characteristics alone and 28.24% is suitable according to climate and land characteristics. Only 6.14% of the total land area is suboptimal. Dry season results show 25.07% of the total land area is suboptimal or unsuitable. Approximately 42% of Malawi is suitable based on land characteristics alone during the dry season, and 13.11% is suitable based on land plus another factor. Less than 2% meets suitability criteria for climate, water, and land criteria. Findings illustrate environmental drivers of suitability for malaria vectors, providing an opportunity for a more comprehensive approach to malaria control that includes not only modeled species distributions, but also the underlying drivers of suitability for a more effective approach to environmental management.}, } @article {pmid32749335, year = {2020}, author = {Kilper, DC and Peyghambarian, N}, title = {Changing evolution of optical communication systems at the network edges.}, journal = {Applied optics}, volume = {59}, number = {22}, pages = {G209-G218}, doi = {10.1364/AO.394119}, pmid = {32749335}, issn = {1539-4522}, abstract = {Metro and data center networks are growing rapidly, while global fixed Internet traffic growth shows evidence of slowing. An analysis of the distribution of network capacity versus distance reveals capacity gaps in networks important to wireless backhaul networks and cloud computing. These networks are built from layers of electronic aggregation switches. Photonic integration and software-defined networking control are identified as key enabling technologies for the use of optical switching in these applications. Advances in optical switching for data center and metro networks in the CIAN engineering research center are reviewed and examined as potential directions for optical communication system evolution.}, } @article {pmid32746308, year = {2020}, author = {Camara Gradim, LC and Archanjo Jose, M and Marinho Cezar da Cruz, D and de Deus Lopes, R}, title = {IoT Services and Applications in Rehabilitation: An Interdisciplinary and Meta-Analysis Review.}, journal = {IEEE transactions on neural systems and rehabilitation engineering : a publication of the IEEE Engineering in Medicine and Biology Society}, volume = {28}, number = {9}, pages = {2043-2052}, doi = {10.1109/TNSRE.2020.3005616}, pmid = {32746308}, issn = {1558-0210}, mesh = {Humans ; *Internet of Things ; *Wearable Electronic Devices ; }, abstract = {UNLABELLED: Internet of things (IoT) is a designation given to a technological system that can enhance possibilities of connectivity between people and things and has been showing to be an opportunity for developing and improving smart rehabilitation systems and helps in the e-Health area.

OBJECTIVE: to identify works involving IoT that deal with the development, architecture, application, implementation, use of technological equipment in the area of patient rehabilitation. Technology or Method: A systematic review based on Kitchenham's suggestions combined to the PRISMA protocol. The search strategy was carried out comprehensively in the IEEE Xplore Digital Library, Web of Science and Scopus databases with the data extraction method for assessment and analysis consist only of primary studies articles related to the IoT and Rehabilitation of patients.

RESULTS: We found 29 studies that addressed the research question, and all were classified based on scientific evidence.

CONCLUSIONS: This systematic review presents the current state of the art on the IoT in health rehabilitation and identifies findings in interdisciplinary researches in different clinical cases with technological systems including wearable devices and cloud computing. The gaps in IoT for rehabilitation include the need for more clinical randomized controlled trials and longitudinal studies. Clinical Impact: This paper has an interdisciplinary feature and includes areas such as Internet of Things Information and Communication Technology with their application to the medical and rehabilitation domains.}, } @article {pmid32731501, year = {2020}, author = {Jo, JH and Jo, B and Kim, JH and Choi, I}, title = {Implementation of IoT-Based Air Quality Monitoring System for Investigating Particulate Matter (PM10) in Subway Tunnels.}, journal = {International journal of environmental research and public health}, volume = {17}, number = {15}, pages = {}, pmid = {32731501}, issn = {1660-4601}, mesh = {Air Pollutants/*analysis ; Air Pollution/analysis/statistics & numerical data ; *Environmental Monitoring ; Particulate Matter/*analysis ; *Railroads ; Republic of Korea ; }, abstract = {Air quality monitoring for subway tunnels in South Korea is a topic of great interest because more than 8 million passengers per day use the subway, which has a concentration of particulate matter (PM10) greater than that of above ground. In this paper, an Internet of Things (IoT)-based air quality monitoring system, consisting of an air quality measurement device called Smart-Air, an IoT gateway, and a cloud computing web server, is presented to monitor the concentration of PM10 in subway tunnels. The goal of the system is to efficiently monitor air quality at any time and from anywhere by combining IoT and cloud computing technologies. This system was successfully implemented in Incheon's subway tunnels to investigate levels of PM10. The concentration of particulate matter was greatest between the morning and afternoon rush hours. In addition, the residence time of PM10 increased as the depth of the monitoring location increased. During the experimentation period, the South Korean government implemented an air quality management system. An analysis was performed to follow up after implementation and assess how the change improved conditions. Based on the experiments, the system was efficient and effective at monitoring particulate matter for improving air quality in subway tunnels.}, } @article {pmid32725321, year = {2020}, author = {Watts, P and Breedon, P and Nduka, C and Neville, C and Venables, V and Clarke, S}, title = {Cloud Computing Mobile Application for Remote Monitoring of Bell's Palsy.}, journal = {Journal of medical systems}, volume = {44}, number = {9}, pages = {149}, pmid = {32725321}, issn = {1573-689X}, support = {II-LA-0814-20008//National Institute for Health Research/ ; }, mesh = {*Bell Palsy ; Cloud Computing ; Computer Security ; Europe ; Humans ; *Mobile Applications ; }, abstract = {Mobile applications provide the healthcare industry with a means of connecting with patients in their own home utilizing their own personal mobile devices such as tablets and phones. This allows therapists to monitor the progress of people under their care from a remote location and all with the added benefit that patients are familiar with their own mobile devices; thereby reducing the time required to train patients with the new technology. There is also the added benefit to the health service that there is no additional cost required to purchase devices for use. The Facial Remote Activity Monitoring Eyewear (FRAME) mobile application and web service framework has been designed to work on the IOS and android platforms, the two most commonly used today. Results: The system utilizes secure cloud based data storage to collect, analyse and store data, this allows for near real time, secure access remotely by therapists to monitor their patients and intervene when required. The underlying framework has been designed to be secure, anonymous and flexible to ensure compliance with the data protection act and the latest General Data Protection Regulation (GDPR); this new standard came into effect in April 2018 and replaces the Data Protection Act in the UK and Europe.}, } @article {pmid32719837, year = {2020}, author = {Krissaane, I and De Niz, C and Gutiérrez-Sacristán, A and Korodi, G and Ede, N and Kumar, R and Lyons, J and Manrai, A and Patel, C and Kohane, I and Avillach, P}, title = {Scalability and cost-effectiveness analysis of whole genome-wide association studies on Google Cloud Platform and Amazon Web Services.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {27}, number = {9}, pages = {1425-1430}, pmid = {32719837}, issn = {1527-974X}, support = {K01 HL138259/HL/NHLBI NIH HHS/United States ; OT3 OD025466/OD/NIH HHS/United States ; OT3 HL142480/HL/NHLBI NIH HHS/United States ; }, mesh = {*Cloud Computing/economics ; Computer Communication Networks ; Cost-Benefit Analysis ; *Genome-Wide Association Study/economics/methods ; Genomics/methods ; Humans ; }, abstract = {OBJECTIVE: Advancements in human genomics have generated a surge of available data, fueling the growth and accessibility of databases for more comprehensive, in-depth genetic studies.

METHODS: We provide a straightforward and innovative methodology to optimize cloud configuration in order to conduct genome-wide association studies. We utilized Spark clusters on both Google Cloud Platform and Amazon Web Services, as well as Hail (http://doi.org/10.5281/zenodo.2646680) for analysis and exploration of genomic variants dataset.

RESULTS: Comparative evaluation of numerous cloud-based cluster configurations demonstrate a successful and unprecedented compromise between speed and cost for performing genome-wide association studies on 4 distinct whole-genome sequencing datasets. Results are consistent across the 2 cloud providers and could be highly useful for accelerating research in genetics.

CONCLUSIONS: We present a timely piece for one of the most frequently asked questions when moving to the cloud: what is the trade-off between speed and cost?}, } @article {pmid32719530, year = {2020}, author = {Li, B and Gould, J and Yang, Y and Sarkizova, S and Tabaka, M and Ashenberg, O and Rosen, Y and Slyper, M and Kowalczyk, MS and Villani, AC and Tickle, T and Hacohen, N and Rozenblatt-Rosen, O and Regev, A}, title = {Cumulus provides cloud-based data analysis for large-scale single-cell and single-nucleus RNA-seq.}, journal = {Nature methods}, volume = {17}, number = {8}, pages = {793-798}, pmid = {32719530}, issn = {1548-7105}, support = {/HHMI/Howard Hughes Medical Institute/United States ; T32 HG002295/HG/NHGRI NIH HHS/United States ; RC2 DK116691/DK/NIDDK NIH HHS/United States ; RM1 HG006193/HG/NHGRI NIH HHS/United States ; T32 CA207021/CA/NCI NIH HHS/United States ; }, mesh = {Cloud Computing/*economics ; Computational Biology/economics/*methods ; High-Throughput Nucleotide Sequencing/economics/*methods ; Sequence Analysis, RNA/economics/*methods ; Single-Cell Analysis/*methods ; }, abstract = {Massively parallel single-cell and single-nucleus RNA sequencing has opened the way to systematic tissue atlases in health and disease, but as the scale of data generation is growing, so is the need for computational pipelines for scaled analysis. Here we developed Cumulus-a cloud-based framework for analyzing large-scale single-cell and single-nucleus RNA sequencing datasets. Cumulus combines the power of cloud computing with improvements in algorithm and implementation to achieve high scalability, low cost, user-friendliness and integrated support for a comprehensive set of features. We benchmark Cumulus on the Human Cell Atlas Census of Immune Cells dataset of bone marrow cells and show that it substantially improves efficiency over conventional frameworks, while maintaining or improving the quality of results, enabling large-scale studies.}, } @article {pmid32707801, year = {2020}, author = {Song, Y and Zhu, Y and Nan, T and Hou, J and Du, S and Song, S}, title = {Accelerating Faceting Wide-Field Imaging Algorithm with FPGA for SKA Radio Telescope as a Vast Sensor Array.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {15}, pages = {}, pmid = {32707801}, issn = {1424-8220}, support = {U1831118//the National Natural Science Foundation of China/ ; }, abstract = {The SKA (Square Kilometer Array) radio telescope will become the most sensitive telescope by correlating a huge number of antenna nodes to form a vast array of sensors in a region over one hundred kilometers. Faceting, the wide-field imaging algorithm, is a novel approach towards solving image construction from sensing data where earth surface curves cannot be ignored. However, the traditional processor of cloud computing, even if the most sophisticated supercomputer is used, cannot meet the extremely high computation performance requirement. In this paper, we propose the design and implementation of high-efficiency FPGA (Field Programmable Gate Array) -based hardware acceleration of the key algorithm, faceting in SKA by focusing on phase rotation and gridding, which are the most time-consuming phases in the faceting algorithm. Through the analysis of algorithm behavior and bottleneck, we design and optimize the memory architecture and computing logic of the FPGA-based accelerator. The simulation and tests on FPGA are done to confirm the acceleration result of our design and it is shown that the acceleration performance we achieved on phase rotation is 20× the result of the previous work. We then further designed and optimized an efficient microstructure of loop unrolling and pipeline for the gridding accelerator, and the designed system simulation was done to confirm the performance of our structure. The result shows that the acceleration ratio is 5.48 compared to the result tested on software in gridding parts. Hence, our approach enables efficient acceleration of the faceting algorithm on FPGAs with high performance to meet the computational constraints of SKA as a representative vast sensor array.}, } @article {pmid32706696, year = {2020}, author = {Saarikko, J and Niela-Vilen, H and Ekholm, E and Hamari, L and Azimi, I and Liljeberg, P and Rahmani, AM and Löyttyniemi, E and Axelin, A}, title = {Continuous 7-Month Internet of Things-Based Monitoring of Health Parameters of Pregnant and Postpartum Women: Prospective Observational Feasibility Study.}, journal = {JMIR formative research}, volume = {4}, number = {7}, pages = {e12417}, pmid = {32706696}, issn = {2561-326X}, abstract = {BACKGROUND: Monitoring during pregnancy is vital to ensure the mother's and infant's health. Remote continuous monitoring provides health care professionals with significant opportunities to observe health-related parameters in their patients and to detect any pathological signs at an early stage of pregnancy, and may thus partially replace traditional appointments.

OBJECTIVE: This study aimed to evaluate the feasibility of continuously monitoring the health parameters (physical activity, sleep, and heart rate) of nulliparous women throughout pregnancy and until 1 month postpartum, with a smart wristband and an Internet of Things (IoT)-based monitoring system.

METHODS: This prospective observational feasibility study used a convenience sample of 20 nulliparous women from the Hospital District of Southwest Finland. Continuous monitoring of physical activity/step counts, sleep, and heart rate was performed with a smart wristband for 24 hours a day, 7 days a week over 7 months (6 months during pregnancy and 1 month postpartum). The smart wristband was connected to a cloud server. The total number of possible monitoring days during pregnancy weeks 13 to 42 was 203 days and 28 days in the postpartum period.

RESULTS: Valid physical activity data were available for a median of 144 (range 13-188) days (75% of possible monitoring days), and valid sleep data were available for a median of 137 (range 0-184) days (72% of possible monitoring days) per participant during pregnancy. During the postpartum period, a median of 15 (range 0-25) days (54% of possible monitoring days) of valid physical activity data and 16 (range 0-27) days (57% of possible monitoring days) of valid sleep data were available. Physical activity decreased from the second trimester to the third trimester by a mean of 1793 (95% CI 1039-2548) steps per day (P<.001). The decrease continued by a mean of 1339 (95% CI 474-2205) steps to the postpartum period (P=.004). Sleep during pregnancy also decreased from the second trimester to the third trimester by a mean of 20 minutes (95% CI -0.7 to 42 minutes; P=.06) and sleep time shortened an additional 1 hour (95% CI 39 minutes to 1.5 hours) after delivery (P<.001). The mean resting heart rate increased toward the third trimester and returned to the early pregnancy level during the postpartum period.

CONCLUSIONS: The smart wristband with IoT technology was a feasible system for collecting representative data on continuous variables of health parameters during pregnancy. Continuous monitoring provides real-time information between scheduled appointments and thus may help target and tailor pregnancy follow-up.}, } @article {pmid32704048, year = {2020}, author = {Szamosfalvi, B and Yessayan, L}, title = {Innovations in CKRT: individualized therapy with fewer complications.}, journal = {Nature reviews. Nephrology}, volume = {16}, number = {10}, pages = {560-561}, pmid = {32704048}, issn = {1759-507X}, mesh = {Acute Kidney Injury/therapy ; Blood Coagulation Disorders/prevention & control ; Cloud Computing ; Continuous Renal Replacement Therapy/adverse effects/instrumentation/*methods ; Extracorporeal Membrane Oxygenation/instrumentation/methods ; Humans ; Inventions ; Monitoring, Physiologic/methods ; Precision Medicine/adverse effects/instrumentation/methods ; }, abstract = {Continuous kidney replacement therapy (CKRT) can be a lifesaving intervention for critically ill patients; however, mortality remains high. The adaptation of existing innovations, including anti-clotting measures; cloud-computing for optimized treatment prescribing and therapy monitoring; and real-time sensing of blood and/or filter effluent composition to CKRT devices has the potential to enable personalized care and improve the safety and efficacy of this therapy.}, } @article {pmid32679671, year = {2020}, author = {Jabbar, R and Kharbeche, M and Al-Khalifa, K and Krichen, M and Barkaoui, K}, title = {Blockchain for the Internet of Vehicles: A Decentralized IoT Solution for Vehicles Communication Using Ethereum.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {14}, pages = {}, pmid = {32679671}, issn = {1424-8220}, support = {NPRP8-910-2-387//Qatar National Research Fund/ ; }, abstract = {The concept of smart cities has become prominent in modern metropolises due to the emergence of embedded and connected smart devices, systems, and technologies. They have enabled the connection of every "thing" to the Internet. Therefore, in the upcoming era of the Internet of Things, the Internet of Vehicles (IoV) will play a crucial role in newly developed smart cities. The IoV has the potential to solve various traffic and road safety problems effectively in order to prevent fatal crashes. However, a particular challenge in the IoV, especially in Vehicle-to-Vehicle (V2V) and Vehicle-to-Infrastructure (V2I) communications, is to ensure fast, secure transmission and accurate recording of the data. In order to overcome these challenges, this work is adapting Blockchain technology for real time application (RTA) to solve Vehicle-to-Everything (V2X) communications problems. Therefore, the main novelty of this paper is to develop a Blockchain-based IoT system in order to establish secure communication and create an entirely decentralized cloud computing platform. Moreover, the authors qualitatively tested the performance and resilience of the proposed system against common security attacks. Computational tests showed that the proposed solution solved the main challenges of Vehicle-to-X (V2X) communications such as security, centralization, and lack of privacy. In addition, it guaranteed an easy data exchange between different actors of intelligent transportation systems.}, } @article {pmid32679583, year = {2021}, author = {Onnela, JP}, title = {Opportunities and challenges in the collection and analysis of digital phenotyping data.}, journal = {Neuropsychopharmacology : official publication of the American College of Neuropsychopharmacology}, volume = {46}, number = {1}, pages = {45-54}, pmid = {32679583}, issn = {1740-634X}, mesh = {Data Collection ; *Machine Learning ; Phenotype ; Research Design ; *Smartphone ; }, abstract = {The broad adoption and use of smartphones has led to fundamentally new opportunities for capturing social, behavioral, and cognitive phenotypes in free-living settings, outside of research laboratories and clinics. Predicated on the use of existing personal devices rather than the introduction of additional instrumentation, smartphone-based digital phenotyping presents us with several opportunities and challenges in data collection and data analysis. These two aspects are strongly coupled, because decisions about what data to collect and how to collect it constrain what statistical analyses can be carried out, now and years later, and therefore ultimately determine what scientific, clinical, and public health questions may be asked and answered. Digital phenotyping combines the excitement of fast-paced technologies, smartphones, cloud computing and machine learning, with deep mathematical and statistical questions, and it does this in the service of a better understanding our own behavior in ways that are objective, scalable, and reproducible. We will discuss some fundamental aspects of collection and analysis of digital phenotyping data, which takes us on a brief tour of several important scientific and technological concepts, from the open-source paradigm to computational complexity, with some unexpected insights provided by fields as varied as zoology and quantum mechanics.}, } @article {pmid32673064, year = {2023}, author = {Mubarakali, A and Durai, AD and Alshehri, M and AlFarraj, O and Ramakrishnan, J and Mavaluru, D}, title = {Fog-Based Delay-Sensitive Data Transmission Algorithm for Data Forwarding and Storage in Cloud Environment for Multimedia Applications.}, journal = {Big data}, volume = {11}, number = {2}, pages = {128-136}, doi = {10.1089/big.2020.0090}, pmid = {32673064}, issn = {2167-647X}, mesh = {*Multimedia ; Algorithms ; Software ; *Internet of Things ; Japan ; }, abstract = {Fog computing is playing a vital role in data transmission to distributed devices in the Internet of Things (IoT) and another network paradigm. The fundamental element of fog computing is an additional layer added between an IoT device/node and a cloud server. These fog nodes are used to speed up time-critical applications. Current research efforts and user trends are pushing for fog computing, and the path is far from being paved. Unless it can reap the benefits of applying software-defined networks and network function virtualization techniques, network monitoring will be an additional burden for fog. However, the seamless integration of these techniques in fog computing is not easy and will be a challenging task. To overcome the issues as already mentioned, the fog-based delay-sensitive data transmission algorithm develops a robust optimal technique to ensure the low and predictable delay in delay-sensitive applications such as traffic monitoring and vehicle tracking applications. The method reduces latency by storing and processing the data close to the source of information with optimal depth in the network. The deployment results show that the proposed algorithm reduces 15.67 ms round trip time and 2 seconds averaged delay on 10 KB, 100 KB, and 1 MB data set India, Singapore, and Japan Amazon Datacenter Regions compared with conventional methodologies.}, } @article {pmid32664251, year = {2020}, author = {Slamnik-Kriještorac, N and Silva, EBE and Municio, E and Resende, HCC and Hadiwardoyo, SA and Marquez-Barja, JM}, title = {Network Service and Resource Orchestration: A Feature and Performance Analysis within the MEC-Enhanced Vehicular Network Context.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {14}, pages = {}, pmid = {32664251}, issn = {1424-8220}, support = {825012//H2020 5G-CARMEN/ ; 723638//FED4FIRE+/ ; }, abstract = {By providing storage and computational resources at the network edge, which enables hosting applications closer to the mobile users, Multi-Access Edge Computing (MEC) uses the mobile backhaul, and the network core more efficiently, thereby reducing the overall latency. Fostering the synergy between 5G and MEC brings ultra-reliable low-latency in data transmission, and paves the way towards numerous latency-sensitive automotive use cases, with the ultimate goal of enabling autonomous driving. Despite the benefits of significant latency reduction, bringing MEC platforms into 5G-based vehicular networks imposes severe challenges towards poorly scalable network management, as MEC platforms usually represent a highly heterogeneous environment. Therefore, there is a strong need to perform network management and orchestration in an automated way, which, being supported by Software Defined Networking (SDN) and Network Function Virtualization (NFV), will further decrease the latency. With recent advances in SDN, along with NFV, which aim to facilitate management automation for tackling delay issues in vehicular communications, we study the closed-loop life-cycle management of network services, and map such cycle to the Management and Orchestration (MANO) systems, such as ETSI NFV MANO. In this paper, we provide a comprehensive overview of existing MANO solutions, studying their most important features to enable network service and resource orchestration in MEC-enhanced vehicular networks. Finally, using a real testbed setup, we conduct and present an extensive performance analysis of Open Baton and Open Source MANO that are, due to their lightweight resource footprint, and compliance to ETSI standards, suitable solutions for resource and service management and orchestration within the network edge.}, } @article {pmid32658738, year = {2020}, author = {Zeng, Y and Zhang, J}, title = {A machine learning model for detecting invasive ductal carcinoma with Google Cloud AutoML Vision.}, journal = {Computers in biology and medicine}, volume = {122}, number = {}, pages = {103861}, doi = {10.1016/j.compbiomed.2020.103861}, pmid = {32658738}, issn = {1879-0534}, mesh = {*Carcinoma, Ductal ; Humans ; *Machine Learning ; Neural Networks, Computer ; }, abstract = {OBJECTIVES: This study is aimed to assess the feasibility of AutoML technology for the identification of invasive ductal carcinoma (IDC) in whole slide images (WSI).

METHODS: The study presents an experimental machine learning (ML) model based on Google Cloud AutoML Vision instead of a handcrafted neural network. A public dataset of 278,124 labeled histopathology images is used as the original dataset for the model creation. In order to balance the number of positive and negative IDC samples, this study also augments the original public dataset by rotating a large portion of positive image samples. As a result, a total number of 378,215 labeled images are applied.

RESULTS: A score of 91.6% average accuracy is achieved during the model evaluation as measured by the area under precision-recall curve (AuPRC). A subsequent test on a held-out test dataset (unseen by the model) yields a balanced accuracy of 84.6%. These results outperform the ones reported in the earlier studies. Similar performance is observed from a generalization test with new breast tissue samples we collected from the hospital.

CONCLUSIONS: The results obtained from this study demonstrate the maturity and feasibility of an AutoML approach for IDC identification. The study also shows the advantage of AutoML approach when combined at scale with cloud computing.}, } @article {pmid32657996, year = {2020}, author = {Wang, SY and Pershing, S and Lee, AY and , }, title = {Big data requirements for artificial intelligence.}, journal = {Current opinion in ophthalmology}, volume = {31}, number = {5}, pages = {318-323}, pmid = {32657996}, issn = {1531-7021}, support = {K23 EY029246/EY/NEI NIH HHS/United States ; P30 EY010572/EY/NEI NIH HHS/United States ; T15 LM007033/LM/NLM NIH HHS/United States ; }, mesh = {Artificial Intelligence/*standards ; *Big Data ; Electronic Health Records ; Humans ; Ophthalmology/*standards ; }, abstract = {PURPOSE OF REVIEW: To summarize how big data and artificial intelligence technologies have evolved, their current state, and next steps to enable future generations of artificial intelligence for ophthalmology.

RECENT FINDINGS: Big data in health care is ever increasing in volume and variety, enabled by the widespread adoption of electronic health records (EHRs) and standards for health data information exchange, such as Digital Imaging and Communications in Medicine and Fast Healthcare Interoperability Resources. Simultaneously, the development of powerful cloud-based storage and computing architectures supports a fertile environment for big data and artificial intelligence in health care. The high volume and velocity of imaging and structured data in ophthalmology and is one of the reasons why ophthalmology is at the forefront of artificial intelligence research. Still needed are consensus labeling conventions for performing supervised learning on big data, promotion of data sharing and reuse, standards for sharing artificial intelligence model architectures, and access to artificial intelligence models through open application program interfaces (APIs).

SUMMARY: Future requirements for big data and artificial intelligence include fostering reproducible science, continuing open innovation, and supporting the clinical use of artificial intelligence by promoting standards for data labels, data sharing, artificial intelligence model architecture sharing, and accessible code and APIs.}, } @article {pmid32646548, year = {2020}, author = {Jiang, W and Guo, L and Wu, H and Ying, J and Yang, Z and Wei, B and Pan, F and Han, Y}, title = {Use of a smartphone for imaging, modelling, and evaluation of keloids.}, journal = {Burns : journal of the International Society for Burn Injuries}, volume = {46}, number = {8}, pages = {1896-1902}, doi = {10.1016/j.burns.2020.05.026}, pmid = {32646548}, issn = {1879-1409}, mesh = {Adult ; Burns/complications/diagnostic imaging ; China ; Female ; Humans ; Imaging, Three-Dimensional/methods/*standards/statistics & numerical data ; Keloid/*diagnostic imaging ; Male ; Middle Aged ; Reproducibility of Results ; Smartphone/instrumentation/*standards/statistics & numerical data ; }, abstract = {OBJECTIVE: We used a smartphone to construct three-dimensional (3D) models of keloids, then quantitatively simulate and evaluate these tissues.

METHODS: We uploaded smartphone photographs of 33 keloids on the chest, shoulder, neck, limbs, or abdomen of 28 patients. We used the parallel computing power of a graphics processing unit to calculate the spatial co-ordinates of each pixel in the cloud, then generated 3D models. We obtained the longest diameter, thickness, and volume of each keloid, then compared these data to findings obtained by traditional methods.

RESULTS: Measurement repeatability was excellent: intraclass correlation coefficients were 0.998 for longest diameter, 0.978 for thickness, and 0.993 for volume. When measuring the longest diameter and volume, the results agreed with Vernier caliper measurements and with measurements obtained after the injection of water into the cavity. When measuring thickness, the findings were similar to those obtained by ultrasound. Bland-Altman analyses showed that the ratios of 95% confidence interval extremes were 3.03% for longest diameter, 3.03% for volume, and 6.06% for thickness.

CONCLUSION: Smartphones were used to acquire data that was then employed to construct 3D models of keloids; these models yielded quantitative data with excellent reliability and validity. The smartphone can serve as an additional tool for keloid diagnosis and research, and will facilitate medical treatment over the internet.}, } @article {pmid32637027, year = {2019}, author = {Tilton, JC and Wolfe, RE and Lin, GG and Dellomo, JJ}, title = {On-Orbit Measurement of the Effective Focal Length and Band-to-Band Registration of Satellite-Borne Whiskbroom Imaging Sensors.}, journal = {IEEE journal of selected topics in applied earth observations and remote sensing}, volume = {12}, number = {11}, pages = {4622-4633}, pmid = {32637027}, issn = {1939-1404}, support = {/SCMD-EarthScienceSystem/Science Earth Science System NASA/United States ; }, abstract = {We have developed an approach for the measurement of the Effective Focal Length (EFL) and Band-to-Band Registration (BBR) of selected spectral bands of satellite-borne whiskbroom imaging sensors from on-orbit data. Our approach is based on simulating the coarser spatial resolution whiskbroom sensor data with finer spatial resolution Landsat 7 ETM+ or Landsat 8 OLI data using the geolocation (Earth location) information from each sensor, and computing the correlation between the simulated and original data. For each scan of a selected spectral band of the whiskbroom data set, various subsets of the data are examined to find the subset with the highest spatial correlation between the original and simulated data using the nominal geolocation information. Then, for this best subset, the focal length value and the spatial shift are varied to find the values that produce the highest spatial correlation between the original and simulated data. This best focal length value is taken to be the measured instrument EFL and the best spatial shift is taken to be the registration of the whiskbroom data relative to the Landsat data, from which the BBR is inferred. Best results are obtained with cloud-free subsets with contrasting land features. This measurement is repeated over other scans with cloud-free subsets. We demonstrate our approach with on-orbit data from the Aqua and Terra MODIS instruments and SNPP and J1 VIIRS instruments.}, } @article {pmid32636922, year = {2020}, author = {Di Gennaro, SF and Matese, A}, title = {Evaluation of novel precision viticulture tool for canopy biomass estimation and missing plant detection based on 2.5D and 3D approaches using RGB images acquired by UAV platform.}, journal = {Plant methods}, volume = {16}, number = {}, pages = {91}, pmid = {32636922}, issn = {1746-4811}, abstract = {BACKGROUND: The knowledge of vine vegetative status within a vineyard plays a key role in canopy management in order to achieve a correct vine balance and reach the final desired yield/quality. Detailed information about canopy architecture and missing plants distribution provides useful support for farmers/winegrowers to optimize canopy management practices and the replanting process, respectively. In the last decade, there has been a progressive diffusion of UAV (Unmanned Aerial Vehicles) technologies for Precision Viticulture purposes, as fast and accurate methodologies for spatial variability of geometric plant parameters. The aim of this study was to implement an unsupervised and integrated procedure of biomass estimation and missing plants detection, using both the 2.5D-surface and 3D-alphashape methods.

RESULTS: Both methods showed good overall accuracy respect to ground truth biomass measurements with high values of R[2] (0.71 and 0.80 for 2.5D and 3D, respectively). The 2.5D method led to an overestimation since it is derived by considering the vine as rectangular cuboid form. On the contrary, the 3D method provided more accurate results as a consequence of the alphashape algorithm, which is capable to detect each single shoot and holes within the canopy. Regarding the missing plants detection, the 3D approach confirmed better performance in cases of hidden conditions by shoots of adjacent plants or sparse canopy with some empty spaces along the row, where the 2.5D method based on the length of section of the row with lower thickness than the threshold used (0.10 m), tended to return false negatives and false positives, respectively.

CONCLUSIONS: This paper describes a rapid and objective tool for the farmer to promptly identify canopy management strategies and drive replanting decisions. The 3D approach provided results closer to real canopy volume and higher performance in missing plant detection. However, the dense cloud based analysis required more processing time. In a future perspective, given the continuous technological evolution in terms of computing performance, the overcoming of the current limit represented by the pre- and post-processing phases of the large image dataset should mainstream this methodology.}, } @article {pmid32635632, year = {2020}, author = {Pastor-Vargas, R and Tobarra, L and Robles-Gómez, A and Martin, S and Hernández, R and Cano, J}, title = {A WoT Platform for Supporting Full-Cycle IoT Solutions from Edge to Cloud Infrastructures: A Practical Case.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {13}, pages = {}, pmid = {32635632}, issn = {1424-8220}, support = {eNMoLabs research project//Universidad Nacional de Educación a Distancia/ ; }, abstract = {Internet of Things (IoT) learning involves the acquisition of transversal skills ranging from the development based on IoT devices and sensors (edge computing) to the connection of the devices themselves to management environments that allow the storage and processing (cloud computing) of data generated by sensors. The usual development cycle for IoT applications consists of the following three stages: stage 1 corresponds to the description of the devices and basic interaction with sensors. In stage 2, data acquired by the devices/sensors are employed by communication models from the origin edge to the management middleware in the cloud. Finally, stage 3 focuses on processing and presentation models. These models present the most relevant indicators for IoT devices and sensors. Students must acquire all the necessary skills and abilities to understand and develop these types of applications, so lecturers need an infrastructure to enable the learning of development of full IoT applications. A Web of Things (WoT) platform named Labs of Things at UNED (LoT@UNED) has been used for this goal. This paper shows the fundamentals and features of this infrastructure, and how the different phases of the full development cycle of solutions in IoT environments are implemented using LoT@UNED. The proposed system has been tested in several computer science subjects. Students can perform remote experimentation with a collaborative WoT learning environment in the cloud, including the possibility to analyze the generated data by IoT sensors.}, } @article {pmid32635561, year = {2020}, author = {Lavysh, D and Neu-Yilik, G}, title = {UPF1-Mediated RNA Decay-Danse Macabre in a Cloud.}, journal = {Biomolecules}, volume = {10}, number = {7}, pages = {}, pmid = {32635561}, issn = {2218-273X}, support = {NE 593/2-1 , NE 593/2-2//Deutsche Forschungsgemeinschaft/International ; }, mesh = {Animals ; Fungal Proteins/metabolism ; Humans ; Nonsense Mediated mRNA Decay ; RNA Helicases/*metabolism ; RNA, Messenger/*chemistry ; Trans-Activators/*metabolism ; Yeasts/*metabolism ; }, abstract = {Nonsense-mediated RNA decay (NMD) is the prototype example of a whole family of RNA decay pathways that unfold around a common central effector protein called UPF1. While NMD in yeast appears to be a linear pathway, NMD in higher eukaryotes is a multifaceted phenomenon with high variability with respect to substrate RNAs, degradation efficiency, effector proteins and decay-triggering RNA features. Despite increasing knowledge of the mechanistic details, it seems ever more difficult to define NMD and to clearly distinguish it from a growing list of other UPF1-mediated RNA decay pathways (UMDs). With a focus on mammalian, we here critically examine the prevailing NMD models and the gaps and inconsistencies in these models. By exploring the minimal requirements for NMD and other UMDs, we try to elucidate whether they are separate and definable pathways, or rather variations of the same phenomenon. Finally, we suggest that the operating principle of the UPF1-mediated decay family could be considered similar to that of a computing cloud providing a flexible infrastructure with rapid elasticity and dynamic access according to specific user needs.}, } @article {pmid32611428, year = {2020}, author = {Hyder, A and May, AA}, title = {Translational data analytics in exposure science and environmental health: a citizen science approach with high school students.}, journal = {Environmental health : a global access science source}, volume = {19}, number = {1}, pages = {73}, pmid = {32611428}, issn = {1476-069X}, support = {1645226//Division of Chemical, Bioengineering, Environmental, and Transport Systems/International ; }, mesh = {Adolescent ; Air Pollution/analysis ; Citizen Science/*organization & administration ; Data Science/*methods/organization & administration ; *Environmental Exposure ; Environmental Health/*methods ; Environmental Monitoring/methods ; Humans ; Schools ; Students ; }, abstract = {BACKGROUND: Translational data analytics aims to apply data analytics principles and techniques to bring about broader societal or human impact. Translational data analytics for environmental health is an emerging discipline and the objective of this study is to describe a real-world example of this emerging discipline.

METHODS: We implemented a citizen-science project at a local high school. Multiple cohorts of citizen scientists, who were students, fabricated and deployed low-cost air quality sensors. A cloud-computing solution provided real-time air quality data for risk screening purposes, data analytics and curricular activities.

RESULTS: The citizen-science project engaged with 14 high school students over a four-year period that is continuing to this day. The project led to the development of a website that displayed sensor-based measurements in local neighborhoods and a GitHub-like repository for open source code and instructions. Preliminary results showed a reasonable comparison between sensor-based and EPA land-based federal reference monitor data for CO and NOx.

CONCLUSIONS: Initial sensor-based data collection efforts showed reasonable agreement with land-based federal reference monitors but more work needs to be done to validate these results. Lessons learned were: 1) the need for sustained funding because citizen science-based project timelines are a function of community needs/capacity and building interdisciplinary rapport in academic settings and 2) the need for a dedicated staff to manage academic-community relationships.}, } @article {pmid32598292, year = {2020}, author = {Saleh, N and Abo Agyla, A}, title = {An integrated assessment system for the accreditation of medical laboratories.}, journal = {Biomedizinische Technik. Biomedical engineering}, volume = {}, number = {}, pages = {}, doi = {10.1515/bmt-2019-0133}, pmid = {32598292}, issn = {1862-278X}, abstract = {Medical laboratory accreditation becomes a trend to be trustable for diagnosis of diseases. It is always performed at regular intervals to assure competence of quality management systems (QMS) based on pre-defined standards. However, few attempts were carried out to assess the quality level of medical laboratory services. Moreover, there is no realistic study that classifies and makes analyses of laboratory performance based on a computational model. The purpose of this study was to develop an integrated system for medical laboratory accreditation that assesses QMS against ISO 15189. In addition, a deep analysis of factors that sustain accreditation was presented. The system started with establishing a core matrix that maps QMS elements with ISO 15189 clauses. Through this map, a questionnaire was developed to measure the performance. Therefore, score indices were calculated for the QMS. A fuzzy logic model was designed based on the calculated scores to classify medical laboratories according to their tendency for accreditation. Further, in case of failure of accreditation, cause-and-effect root analysis was done to realize the causes. Finally, cloud computing principles were employed to launch a web application in order to facilitate user interface with the proposed system. In verification, the system has been tested using a dataset of 12 medical laboratories in Egypt. Results have proved system robustness and consistency. Thus, the system is considered as a self-assessment tool that demonstrates points of weakness and strength.}, } @article {pmid32570956, year = {2020}, author = {Ogiela, L and Ogiela, MR and Ko, H}, title = {Intelligent Data Management and Security in Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {12}, pages = {}, pmid = {32570956}, issn = {1424-8220}, support = {DEC-2016/23/B/HS4/00616//Narodowym Centrum Nauki/ ; No 16.16.120.773//Akademia Górniczo-Hutnicza im. Stanislawa Staszica/ ; No. 2017R1A6A1A03015496//National Research Foundation of Korea/ ; }, abstract = {This paper will present the authors' own techniques of secret data management and protection, with particular attention paid to techniques securing data services. Among the solutions discussed, there will be information-sharing protocols dedicated to the tasks of secret (confidential) data sharing. Such solutions will be presented in an algorithmic form, aimed at solving the tasks of protecting and securing data against unauthorized acquisition. Data-sharing protocols will execute the tasks of securing a special type of information, i.e., data services. The area of data protection will be defined for various levels, within which will be executed the tasks of data management and protection. The authors' solution concerning securing data with the use of cryptographic threshold techniques used to split the secret among a specified group of secret trustees, simultaneously enhanced by the application of linguistic methods of description of the shared secret, forms a new class of protocols, i.e., intelligent linguistic threshold schemes. The solutions presented in this paper referring to the service management and securing will be dedicated to various levels of data management. These levels could be differentiated both in the structure of a given entity and in its environment. There is a special example thereof, i.e., the cloud management processes. These will also be subject to the assessment of feasibility of application of the discussed protocols in these areas. Presented solutions will be based on the application of an innovative approach, in which we can use a special formal graph for the creation of a secret representation, which can then be divided and transmitted over a distributed network.}, } @article {pmid32570566, year = {2020}, author = {Coman Schmid, D and Crameri, K and Oesterle, S and Rinn, B and Sengstag, T and Stockinger, H and , }, title = {SPHN - The BioMedIT Network: A Secure IT Platform for Research with Sensitive Human Data.}, journal = {Studies in health technology and informatics}, volume = {270}, number = {}, pages = {1170-1174}, doi = {10.3233/SHTI200348}, pmid = {32570566}, issn = {1879-8365}, mesh = {Big Data ; Cloud Computing ; Computer Security ; *Information Storage and Retrieval ; Privacy ; }, abstract = {The BioMedIT project is funded by the Swiss government as an integral part of the Swiss Personalized Health Network (SPHN), aiming to provide researchers with access to a secure, powerful and versatile IT infrastructure for doing data-driven research on sensitive biomedical data while ensuring data privacy protection. The BioMedIT network gives researchers the ability to securely transfer, store, manage and process sensitive research data. The underlying BioMedIT nodes provide compute and storage capacity that can be used locally or through a federated environment. The network operates under a common Information Security Policy using state-of-the-art security techniques. It utilizes cloud computing, virtualization, compute accelerators (GPUs), big data storage as well as federation technologies to lower computational boundaries for researchers and to guarantee that sensitive data can be processed in a secure and lawful way. Building on existing expertise and research infrastructure at the partnering Swiss institutions, the BioMedIT network establishes a competitive Swiss private-cloud - a secure national infrastructure resource that can be used by researchers of Swiss universities, hospitals and other research institutions.}, } @article {pmid32570417, year = {2020}, author = {Niyitegeka, D and Bellafqira, R and Genin, E and Coatrieux, G}, title = {Secure Collapsing Method Based on Fully Homomorphic Encryption.}, journal = {Studies in health technology and informatics}, volume = {270}, number = {}, pages = {412-416}, doi = {10.3233/SHTI200193}, pmid = {32570417}, issn = {1879-8365}, mesh = {Algorithms ; Cloud Computing ; *Computer Security ; Female ; Genome-Wide Association Study ; Genomics ; Humans ; Logistic Models ; Male ; Privacy ; }, abstract = {In this paper, we propose a new approach for performing privacy-preserving genome-wide association study (GWAS) in cloud environments. This method allows a Genomic Research Unit (GRU) who possesses genetic variants of diseased individuals (cases) to compare his/her data against genetic variants of healthy individuals (controls) from a Genomic Research Center (GRC). The originality of this work stands on a secure version of the collapsing method based on the logistic regression model considering that all data of GRU are stored into the cloud. To do so, we take advantage of fully homomorphic encryption and of secure multiparty computation. Experiment results carried out on real genetic data using the BGV cryptosystem indicate that the proposed scheme provides the same results as the ones achieved on clear data.}, } @article {pmid32562490, year = {2020}, author = {Lawlor, B and Sleator, RD}, title = {The democratization of bioinformatics: A software engineering perspective.}, journal = {GigaScience}, volume = {9}, number = {6}, pages = {}, pmid = {32562490}, issn = {2047-217X}, mesh = {Cloud Computing ; Computational Biology/*methods ; Genomics/methods ; *Software ; }, abstract = {Today, thanks to advances in cloud computing, it is possible for small teams of software developers to produce internet-scale products, a feat that was previously the preserve of large organizations. Herein, we describe how these advances in software engineering can be made more readily available to bioinformaticians. In the same way that cloud computing has democratized access to distributed systems engineering for generalist software engineers, access to scalable and reproducible bioinformatic engineering can be democratized for generalist bioinformaticians and biologists. We present solutions, based on our own efforts, to achieve this goal.}, } @article {pmid32560653, year = {2020}, author = {Ehwerhemuepha, L and Gasperino, G and Bischoff, N and Taraman, S and Chang, A and Feaster, W}, title = {HealtheDataLab - a cloud computing solution for data science and advanced analytics in healthcare with application to predicting multi-center pediatric readmissions.}, journal = {BMC medical informatics and decision making}, volume = {20}, number = {1}, pages = {115}, pmid = {32560653}, issn = {1472-6947}, mesh = {Child ; Child, Preschool ; *Cloud Computing ; *Data Science ; Delivery of Health Care ; Female ; Humans ; Infant ; Infant, Newborn ; Male ; Patient Readmission ; Solutions ; }, abstract = {BACKGROUND: There is a shortage of medical informatics and data science platforms using cloud computing on electronic medical record (EMR) data, and with computing capacity for analyzing big data. We implemented, described, and applied a cloud computing solution utilizing the fast health interoperability resources (FHIR) standardization and state-of-the-art parallel distributed computing platform for advanced analytics.

METHODS: We utilized the architecture of the modern predictive analytics platform called Cerner® HealtheDataLab and described the suite of cloud computing services and Apache Projects that it relies on. We validated the platform by replicating and improving on a previous single pediatric institution study/model on readmission and developing a multi-center model of all-cause readmission for pediatric-age patients using the Cerner® Health Facts Deidentified Database (now updated and referred to as the Cerner Real World Data). We retrieved a subset of 1.4 million pediatric encounters consisting of 48 hospitals' data on pediatric encounters in the database based on a priori inclusion criteria. We built and analyzed corresponding random forest and multilayer perceptron (MLP) neural network models using HealtheDataLab.

RESULTS: Using the HealtheDataLab platform, we developed a random forest model and multi-layer perceptron model with AUC of 0.8446 (0.8444, 0.8447) and 0.8451 (0.8449, 0.8453) respectively. We showed the distribution in model performance across hospitals and identified a set of novel variables under previous resource utilization and generic medications that may be used to improve existing readmission models.

CONCLUSION: Our results suggest that high performance, elastic cloud computing infrastructures such as the platform presented here can be used for the development of highly predictive models on EMR data in a secure and robust environment. This in turn can lead to new clinical insights/discoveries.}, } @article {pmid32557071, year = {2020}, author = {Deep, B and Mathur, I and Joshi, N}, title = {Coalescing IoT and Wi-Fi technologies for an optimized approach in urban route planning.}, journal = {Environmental science and pollution research international}, volume = {27}, number = {27}, pages = {34434-34441}, pmid = {32557071}, issn = {1614-7499}, mesh = {Air Pollutants/*analysis ; Air Pollution/*analysis ; Environmental Monitoring ; India ; Nitrogen Dioxide/analysis ; Particulate Matter/analysis ; }, abstract = {The quality of air that we breathe is one of the more serious environmental challenges that the government faces all around the world. It is a matter of concern for almost all developed and developing countries. The National Air Quality Index (NAQI) in India was first initiated and unveiled by the central government under the Swachh Bharat Abhiyan (Clean India Campaign). It was launched to spread cleanliness, and awareness to work towards a clean and healthy environment among all citizens living in India. This index is computed based on values obtained by monitoring eight types of pollutants that are known to commonly permeate around our immediate environment. These are particulate matter PM10; particulate matter PM2.5; nitrogen dioxide; sulfur dioxide; carbon monoxide; lead; ammonia; and ozone. Studies conducted have shown that almost 90% of particulate matters are produced from vehicular emissions, dust, debris on roads, and industries and from construction sites spanning across rural, semi-urban, and urban areas. While the State and Central governments have devised and implemented several schemes to keep air pollution levels under control, these alone have proved inadequate in cases such as the Delhi region of India. Internet of Things (IoT) offers a range of options that do extends into the domain of environmental management. Using an online monitoring system based on IoT technologies, users can stay informed on fluctuating levels of air pollution. In this paper, the design of a low-price pollution measurement kit working around a dust sensor, capable of transmitting data to a cloud service through a Wi-Fi module, is described. A system overview of urban route planning is also proposed. The proposed model can make users aware of pollutant concentrations at any point of time and can also act as useful input towards the design of the least polluted path prediction app. Hence, the proposed model can help travelers to plan a less polluted route in urban areas.}, } @article {pmid32545495, year = {2020}, author = {Lee, D and Moon, H and Oh, S and Park, D}, title = {mIoT: Metamorphic IoT Platform for On-Demand Hardware Replacement in Large-Scaled IoT Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {12}, pages = {}, pmid = {32545495}, issn = {1424-8220}, support = {NRF2019R1A2C2005099//Basic Science Research Program through the National Research Foundation of Korea (NRF) funded by the Ministry of Science and ICT/ ; }, abstract = {As the Internet of Things (IoT) is becoming more pervasive in our daily lives, the number of devices that connect to IoT edges and data generated at the edges are rapidly increasing. On account of the bottlenecks in servers, due to the increase in data, as well as security and privacy issues, the IoT paradigm has shifted from cloud computing to edge computing. Pursuant to this trend, embedded devices require complex computation capabilities. However, due to various constraints, edge devices cannot equip enough hardware to process data, so the flexibility of operation is reduced, because of the limitations of fixed hardware functions, relative to cloud computing. Recently, as application fields and collected data types diversify, and, in particular, applications requiring complex computation such as artificial intelligence (AI) and signal processing are applied to edges, flexible processing and computation capabilities based on hardware acceleration are required. In this paper, to meet these needs, we propose a new IoT platform, called a metamorphic IoT (mIoT) platform, which can various hardware acceleration with limited hardware platform resources, through on-demand transmission and reconfiguration of required hardware at edges instead of via transference of sensing data to a server. The proposed platform reconfigures the edge's hardware with minimal overhead, based on a probabilistic value, known as callability. The mIoT consists of reconfigurable edge devices based on RISC-V architecture and a server that manages the reconfiguration of edge devices based on callability. Through various experimental results, we confirmed that the callability-based mIoT platform can provide the hardware required by the edge device in real time. In addition, by performing various functions with small hardware, power consumption, which is a major constraint of IoT, can be reduced.}, } @article {pmid32540846, year = {2020}, author = {Suver, C and Thorogood, A and Doerr, M and Wilbanks, J and Knoppers, B}, title = {Bringing Code to Data: Do Not Forget Governance.}, journal = {Journal of medical Internet research}, volume = {22}, number = {7}, pages = {e18087}, pmid = {32540846}, issn = {1438-8871}, support = {//CIHR/Canada ; }, mesh = {Biomedical Research/*methods ; Cloud Computing/*standards ; Humans ; Information Dissemination/*methods ; Reproducibility of Results ; }, abstract = {Developing or independently evaluating algorithms in biomedical research is difficult because of restrictions on access to clinical data. Access is restricted because of privacy concerns, the proprietary treatment of data by institutions (fueled in part by the cost of data hosting, curation, and distribution), concerns over misuse, and the complexities of applicable regulatory frameworks. The use of cloud technology and services can address many of the barriers to data sharing. For example, researchers can access data in high performance, secure, and auditable cloud computing environments without the need for copying or downloading. An alternative path to accessing data sets requiring additional protection is the model-to-data approach. In model-to-data, researchers submit algorithms to run on secure data sets that remain hidden. Model-to-data is designed to enhance security and local control while enabling communities of researchers to generate new knowledge from sequestered data. Model-to-data has not yet been widely implemented, but pilots have demonstrated its utility when technical or legal constraints preclude other methods of sharing. We argue that model-to-data can make a valuable addition to our data sharing arsenal, with 2 caveats. First, model-to-data should only be adopted where necessary to supplement rather than replace existing data-sharing approaches given that it requires significant resource commitments from data stewards and limits scientific freedom, reproducibility, and scalability. Second, although model-to-data reduces concerns over data privacy and loss of local control when sharing clinical data, it is not an ethical panacea. Data stewards will remain hesitant to adopt model-to-data approaches without guidance on how to do so responsibly. To address this gap, we explored how commitments to open science, reproducibility, security, respect for data subjects, and research ethics oversight must be re-evaluated in a model-to-data context.}, } @article {pmid32540775, year = {2020}, author = {Margheri, A and Masi, M and Miladi, A and Sassone, V and Rosenzweig, J}, title = {Decentralised provenance for healthcare data.}, journal = {International journal of medical informatics}, volume = {141}, number = {}, pages = {104197}, doi = {10.1016/j.ijmedinf.2020.104197}, pmid = {32540775}, issn = {1872-8243}, mesh = {Delivery of Health Care ; *Electronic Health Records ; Health Facilities ; *Health Level Seven ; Humans ; Information Storage and Retrieval ; }, abstract = {OBJECTIVE: The creation and exchange of patients' Electronic Healthcare Records have developed significantly in the last decade. Patients' records are however distributed in data silos across multiple healthcare facilities, posing technical and clinical challenges that may endanger patients' safety. Current healthcare sharing systems ensure interoperability of patients' records across facilities, but they have limits in presenting doctors with the clinical context of the data in the records. We design and implement a platform for managing provenance tracking of Electronic Healthcare Records based on blockchain technology, compliant with the latest healthcare standards and following the patient-informed consent preferences.

METHODS: The platform leverages two pillars: the use of international standards such as Integrating the Healthcare Enterprise (IHE), Health Level Seven International (HL7) and Fast Healthcare Interoperability Resources (FHIR) to achieve interoperability, and the use of a provenance creation process that by-design, avoids personal data storage within the blockchain. The platform consists of: (1) a smart contract implemented within the Hyperledger Fabric blockchain that manages provenance according to the W3C PROV for medical document in standardised formats (e.g. a CDA document, a FHIR resource, a DICOM study, etc.); (2) a Java Proxy that intercepts all the document submissions and retrievals for which provenance shall be evaluated; (3) a service used to retrieve the PROV document.

RESULTS: We integrated our decentralised platform with the SpiritEHR engine, an enterprise-grade healthcare system, and we stored and retrieved the available documents in the Mandel's sample CDA repository,[1] which contained no protected health information. Using a cloud-based blockchain solution, we observed that the overhead added to the typical processing time of reading and writing medical data is in the order of milliseconds. Moreover, the integration of the Proxy at the level of exchanged messages in EHR systems allows transparent usage of provenance data in multiple health computing domains such as decision making, data reconciliation, and patient consent auditing.

CONCLUSIONS: By using international healthcare standards and a cloud-based blockchain deployment, we delivered a solution that can manage provenance of patients' records via transparent integration within the routine operations on healthcare data.}, } @article {pmid32537483, year = {2020}, author = {Ben Hassen, H and Ayari, N and Hamdi, B}, title = {A home hospitalization system based on the Internet of things, Fog computing and cloud computing.}, journal = {Informatics in medicine unlocked}, volume = {20}, number = {}, pages = {100368}, pmid = {32537483}, issn = {2352-9148}, abstract = {In recent years, the world has witnessed a significant increase in the number of elderly who often suffer from chronic diseases, and has witnessed in recent months a major spread of the new coronavirus (COVID-19), which has led to thousands of deaths, especially among the elderly and people who suffer from chronic diseases. Coronavirus has also caused many problems in hospitals, where these are no longer able to accommodate a large number of patients. This virus has also begun to spread between medical and paramedical teams, and this causes a major risk to the health of patients staying in hospitals. To reduce the spread of the virus and maintain the health of patients who need a hospital stay, home hospitalization is one of the best possible solutions. This paper proposes a home hospitalization system based on the Internet of Things (IoT), Fog computing, and Cloud computing, which are among the most important technologies that have contributed to the development of the healthcare sector in a significant way. These systems allow patients to recover and receive treatment in their homes and among their families, where patient health and the hospitalization room environmental state are monitored, to enable doctors to follow the hospitalization process and make recommendations to patients and their supervisors, through monitoring units and mobile applications developed for this purpose. The results of evaluation have shown great acceptance of this system by patients and doctors alike.}, } @article {pmid32535840, year = {2020}, author = {Kurzawski, JW and Mikellidou, K and Morrone, MC and Pestilli, F}, title = {The visual white matter connecting human area prostriata and the thalamus is retinotopically organized.}, journal = {Brain structure & function}, volume = {225}, number = {6}, pages = {1839-1853}, pmid = {32535840}, issn = {1863-2661}, support = {797603//H2020 Marie Skłodowska-Curie Actions/ ; 641805//H2020 Marie Skłodowska-Curie Actions/ ; 1636893//Directorate for Computer and Information Science and Engineering/ ; 1734853//Directorate for Social, Behavioral and Economic Sciences/ ; UL1TR002529//National Center for Advancing Translational Sciences/ ; 832813//H2020 European Research Council/ ; 1916518//National Science Foundation/ ; 2017SBCPZY_02//Ministero dell'Istruzione, dell'Università e della Ricerca/ ; ULTTR001108//National Institute of Mental Health/ ; }, mesh = {Connectome ; Diffusion Magnetic Resonance Imaging ; Geniculate Bodies/anatomy & histology ; Humans ; Occipital Lobe/anatomy & histology ; Thalamus/*anatomy & histology ; Visual Cortex/*anatomy & histology ; Visual Pathways/anatomy & histology ; White Matter/*anatomy & histology ; }, abstract = {The human visual system is capable of processing visual information from fovea to the far peripheral visual field. Recent fMRI studies have shown a full and detailed retinotopic map in area prostriata, located ventro-dorsally and anterior to the calcarine sulcus along the parieto-occipital sulcus with strong preference for peripheral and wide-field stimulation. Here, we report the anatomical pattern of white matter connections between area prostriata and the thalamus encompassing the lateral geniculate nucleus (LGN). To this end, we developed and utilized an automated pipeline comprising a series of Apps that run openly on the cloud computing platform brainlife.io to analyse 139 subjects of the Human Connectome Project (HCP). We observe a continuous and extended bundle of white matter fibers from which two subcomponents can be extracted: one passing ventrally parallel to the optic radiations (OR) and another passing dorsally circumventing the lateral ventricle. Interestingly, the loop travelling dorsally connects the thalamus with the central visual field representation of prostriata located anteriorly, while the other loop travelling more ventrally connects the LGN with the more peripheral visual field representation located posteriorly. We then analyse an additional cohort of 10 HCP subjects using a manual plane extraction method outside brainlife.io to study the relationship between the two extracted white matter subcomponents and eccentricity, myelin and cortical thickness gradients within prostriata. Our results are consistent with a retinotopic segregation recently demonstrated in the OR, connecting the LGN and V1 in humans and reveal for the first time a retinotopic segregation regarding the trajectory of a fiber bundle between the thalamus and an associative visual area.}, } @article {pmid32525944, year = {2020}, author = {Alnajrani, HM and Norman, AA and Ahmed, BH}, title = {Privacy and data protection in mobile cloud computing: A systematic mapping study.}, journal = {PloS one}, volume = {15}, number = {6}, pages = {e0234312}, pmid = {32525944}, issn = {1932-6203}, mesh = {Cell Phone ; *Cloud Computing ; *Computer Security ; Confidentiality ; Humans ; *Mobile Applications ; *Privacy ; }, abstract = {As a result of a shift in the world of technology, the combination of ubiquitous mobile networks and cloud computing produced the mobile cloud computing (MCC) domain. As a consequence of a major concern of cloud users, privacy and data protection are getting substantial attention in the field. Currently, a considerable number of papers have been published on MCC with a growing interest in privacy and data protection. Along with this advance in MCC, however, no specific investigation highlights the results of the existing studies in privacy and data protection. In addition, there are no particular exploration highlights trends and open issues in the domain. Accordingly, the objective of this paper is to highlight the results of existing primary studies published in privacy and data protection in MCC to identify current trends and open issues. In this investigation, a systematic mapping study was conducted with a set of six research questions. A total of 1711 studies published from 2009 to 2019 were obtained. Following a filtering process, a collection of 74 primary studies were selected. As a result, the present data privacy threats, attacks, and solutions were identified. Also, the ongoing trends of data privacy exercise were observed. Moreover, the most utilized measures, research type, and contribution type facets were emphasized. Additionally, the current open research issues in privacy and data protection in MCC were highlighted. Furthermore, the results demonstrate the current state-of-the-art of privacy and data protection in MCC, and the conclusion will help to identify research trends and open issues in MCC for researchers and offer useful information in MCC for practitioners.}, } @article {pmid32511322, year = {2021}, author = {Getz, M and Wang, Y and An, G and Asthana, M and Becker, A and Cockrell, C and Collier, N and Craig, M and Davis, CL and Faeder, JR and Ford Versypt, AN and Mapder, T and Gianlupi, JF and Glazier, JA and Hamis, S and Heiland, R and Hillen, T and Hou, D and Islam, MA and Jenner, AL and Kurtoglu, F and Larkin, CI and Liu, B and Macfarlane, F and Maygrundter, P and Morel, PA and Narayanan, A and Ozik, J and Pienaar, E and Rangamani, P and Saglam, AS and Shoemaker, JE and Smith, AM and Weaver, JJA and Macklin, P}, title = {Iterative community-driven development of a SARS-CoV-2 tissue simulator.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, doi = {10.1101/2020.04.02.019075}, pmid = {32511322}, issn = {2692-8205}, support = {P41 GM103712/GM/NIGMS NIH HHS/United States ; R01 AI139088/AI/NIAID NIH HHS/United States ; R35 GM133763/GM/NIGMS NIH HHS/United States ; }, abstract = {The 2019 novel coronavirus, SARS-CoV-2, is a pathogen of critical significance to international public health. Knowledge of the interplay between molecular-scale virus-receptor interactions, single-cell viral replication, intracellular-scale viral transport, and emergent tissue-scale viral propagation is limited. Moreover, little is known about immune system-virus-tissue interactions and how these can result in low-level (asymptomatic) infections in some cases and acute respiratory distress syndrome (ARDS) in others, particularly with respect to presentation in different age groups or pre-existing inflammatory risk factors. Given the nonlinear interactions within and among each of these processes, multiscale simulation models can shed light on the emergent dynamics that lead to divergent outcomes, identify actionable "choke points" for pharmacologic interventions, screen potential therapies, and identify potential biomarkers that differentiate patient outcomes. Given the complexity of the problem and the acute need for an actionable model to guide therapy discovery and optimization, we introduce and iteratively refine a prototype of a multiscale model of SARS-CoV-2 dynamics in lung tissue. The first prototype model was built and shared internationally as open source code and an online interactive model in under 12 hours, and community domain expertise is driving regular refinements. In a sustained community effort, this consortium is integrating data and expertise across virology, immunology, mathematical biology, quantitative systems physiology, cloud and high performance computing, and other domains to accelerate our response to this critical threat to international health. More broadly, this effort is creating a reusable, modular framework for studying viral replication and immune response in tissues, which can also potentially be adapted to related problems in immunology and immunotherapy.}, } @article {pmid32509260, year = {2020}, author = {Khan, F and Khan, MA and Abbas, S and Athar, A and Siddiqui, SY and Khan, AH and Saeed, MA and Hussain, M}, title = {Cloud-Based Breast Cancer Prediction Empowered with Soft Computing Approaches.}, journal = {Journal of healthcare engineering}, volume = {2020}, number = {}, pages = {8017496}, pmid = {32509260}, issn = {2040-2309}, mesh = {Breast/*diagnostic imaging ; Breast Neoplasms/*diagnosis ; *Cloud Computing/statistics & numerical data ; *Diagnosis, Computer-Assisted/statistics & numerical data ; Early Detection of Cancer ; Expert Systems ; Female ; Humans ; Support Vector Machine ; }, abstract = {The developing countries are still starving for the betterment of health sector. The disease commonly found among the women is breast cancer, and past researches have proven results that if the cancer is detected at a very early stage, the chances to overcome the disease are higher than the disease treated or detected at a later stage. This article proposed cloud-based intelligent BCP-T1F-SVM with 2 variations/models like BCP-T1F and BCP-SVM. The proposed BCP-T1F-SVM system has employed two main soft computing algorithms. The proposed BCP-T1F-SVM expert system specifically defines the stage and the type of cancer a person is suffering from. Expert system will elaborate the grievous stages of the cancer, to which extent a patient has suffered. The proposed BCP-SVM gives the higher precision of the proposed breast cancer detection model. In the limelight of breast cancer, the proposed BCP-T1F-SVM expert system gives out the higher precision rate. The proposed BCP-T1F expert system is being employed in the diagnosis of breast cancer at an initial stage. Taking different stages of cancer into account, breast cancer is being dealt by BCP-T1F expert system. The calculations and the evaluation done in this research have revealed that BCP-SVM is better than BCP-T1F. The BCP-T1F concludes out the 96.56 percentage accuracy, whereas the BCP-SVM gives accuracy of 97.06 percentage. The above unleashed research is wrapped up with the conclusion that BCP-SVM is better than the BCP-T1F. The opinions have been recommended by the medical expertise of Sheikh Zayed Hospital Lahore, Pakistan, and Cavan General Hospital, Lisdaran, Cavan, Ireland.}, } @article {pmid32504192, year = {2021}, author = {Soriano-Valdez, D and Pelaez-Ballestas, I and Manrique de Lara, A and Gastelum-Strozzi, A}, title = {The basics of data, big data, and machine learning in clinical practice.}, journal = {Clinical rheumatology}, volume = {40}, number = {1}, pages = {11-23}, pmid = {32504192}, issn = {1434-9949}, mesh = {*Big Data ; Delivery of Health Care ; Humans ; Machine Learning ; *Medical Informatics ; Software ; }, abstract = {Health informatics and biomedical computing have introduced the use of computer methods to analyze clinical information and provide tools to assist clinicians during the diagnosis and treatment of diverse clinical conditions. With the amount of information that can be obtained in the healthcare setting, new methods to acquire, organize, and analyze the data are being developed each day, including new applications in the world of big data and machine learning. In this review, first we present the most basic concepts in data science, including the structural hierarchy of information and how it is managed. A section is dedicated to discussing topics relevant to the acquisition of data, importantly the availability and use of online resources such as survey software and cloud computing services. Along with digital datasets, these tools make it possible to create more diverse models and facilitate collaboration. After, we describe concepts and techniques in machine learning used to process and analyze health data, especially those most widely applied in rheumatology. Overall, the objective of this review is to aid in the comprehension of how data science is used in health, with a special emphasis on the relevance to the field of rheumatology. It provides clinicians with basic tools on how to approach and understand new trends in health informatics analysis currently being used in rheumatology practice. If clinicians understand the potential use and limitations of health informatics, this will facilitate interdisciplinary conversations and continued projects relating to data, big data, and machine learning.}, } @article {pmid32500999, year = {2020}, author = {Zhang, C and Liu, L and Zhou, L and Yin, X and Wei, X and Hu, Y and Liu, Y and Chen, S and Wang, J and Wang, ZL}, title = {Self-Powered Sensor for Quantifying Ocean Surface Water Waves Based on Triboelectric Nanogenerator.}, journal = {ACS nano}, volume = {14}, number = {6}, pages = {7092-7100}, doi = {10.1021/acsnano.0c01827}, pmid = {32500999}, issn = {1936-086X}, abstract = {An ocean wave contains various marine information, but it is generally difficult to obtain the high-precision quantification to meet the needs of ocean development and utilization. Here, we report a self-powered and high-performance triboelectric ocean-wave spectrum sensor (TOSS) fabricated using a tubular triboelectric nanogenerator (TENG) and hollow ball buoy, which not only can adapt to the measurement of ocean surface water waves in any direction but also can eliminate the influence of seawater on the performance of the sensor. Based on the high-sensitivity advantage of TENG, an ultrahigh sensitivity of 2530 mV mm[-1] (which is 100 times higher than that of previous work) and a minimal monitoring error of 0.1% are achieved in monitoring wave height and wave period, respectively. Importantly, six basic ocean-wave parameters (wave height, wave period, wave frequency, wave velocity, wavelength, and wave steepness), wave velocity spectrum, and mechanical energy spectrum have been derived by the electrical signals of TOSS. Our finding not only can provide ocean-wave parameters but also can offer significant and accurate data support for cloud computing of ocean big data.}, } @article {pmid32498594, year = {2020}, author = {Wang, L and Alexander, CA}, title = {Big data analytics in medical engineering and healthcare: methods, advances and challenges.}, journal = {Journal of medical engineering & technology}, volume = {44}, number = {6}, pages = {267-283}, doi = {10.1080/03091902.2020.1769758}, pmid = {32498594}, issn = {1464-522X}, mesh = {*Big Data ; *Biomedical Engineering ; *Delivery of Health Care ; Humans ; }, abstract = {Big data analytics are gaining popularity in medical engineering and healthcare use cases. Stakeholders are finding big data analytics reduce medical costs and personalise medical services for each individual patient. Big data analytics can be used in large-scale genetics studies, public health, personalised and precision medicine, new drug development, etc. The introduction of the types, sources, and features of big data in healthcare as well as the applications and benefits of big data and big data analytics in healthcare is key to understanding healthcare big data and will be discussed in this article. Major methods, platforms and tools of big data analytics in medical engineering and healthcare are also presented. Advances and technology progress of big data analytics in healthcare are introduced, which includes artificial intelligence (AI) with big data, infrastructure and cloud computing, advanced computation and data processing, privacy and cybersecurity, health economic outcomes and technology management, and smart healthcare with sensing, wearable devices and Internet of things (IoT). Current challenges of dealing with big data and big data analytics in medical engineering and healthcare as well as future work are also presented.}, } @article {pmid32490091, year = {2020}, author = {Corbane, C and Politis, P and Kempeneers, P and Simonetti, D and Soille, P and Burger, A and Pesaresi, M and Sabo, F and Syrris, V and Kemper, T}, title = {A global cloud free pixel- based image composite from Sentinel-2 data.}, journal = {Data in brief}, volume = {31}, number = {}, pages = {105737}, pmid = {32490091}, issn = {2352-3409}, abstract = {Large-scale land cover classification from satellite imagery is still a challenge due to the big volume of data to be processed, to persistent cloud-cover in cloud-prone areas as well as seasonal artefacts that affect spatial homogeneity. Sentinel-2 times series from Copernicus Earth Observation program offer a great potential for fine scale land cover mapping thanks to high spatial and temporal resolutions, with a decametric resolution and five-day repeat time. However, the selection of best available scenes, their download together with the requirements in terms of storage and computing resources pose restrictions for large-scale land cover mapping. The dataset presented in this paper corresponds to global cloud-free pixel based composite created from the Sentinel-2 data archive (Level L1C) available in Google Earth Engine for the period January 2017- December 2018. The methodology used for generating the image composite is described and the metadata associated with the 10 m resolution dataset is presented. The data with a total volume of 15 TB is stored on the Big Data platform of the Joint Research Centre. It can be downloaded per UTM grid zone, loaded into GIS clients and displayed easily thanks to pre-computed overviews.}, } @article {pmid32486383, year = {2020}, author = {Abd-El-Atty, B and Iliyasu, AM and Alaskar, H and Abd El-Latif, AA}, title = {A Robust Quasi-Quantum Walks-Based Steganography Protocol for Secure Transmission of Images on Cloud-Based E-healthcare Platforms.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {11}, pages = {}, pmid = {32486383}, issn = {1424-8220}, support = {Advanced Computational Intelligence & Intelligent Systems Engineering (ACIISE) Research Group Project Number 2019/01/9862//Prince Sattam bin Abdulaziz University/ ; }, mesh = {*Algorithms ; *Cloud Computing ; *Computer Security ; *Image Processing, Computer-Assisted ; Quantum Theory ; *Telemedicine ; }, abstract = {Traditionally, tamper-proof steganography involves using efficient protocols to encrypt the stego cover image and/or hidden message prior to embedding it into the carrier object. However, as the inevitable transition to the quantum computing paradigm beckons, its immense computing power will be exploited to violate even the best non-quantum, i.e., classical, stego protocol. On its part, quantum walks can be tailored to utilise their astounding 'quantumness' to propagate nonlinear chaotic behaviours as well as its sufficient sensitivity to alterations in primary key parameters both important properties for efficient information security. Our study explores using a classical (i.e., quantum-inspired) rendition of the controlled alternate quantum walks (i.e., CAQWs) model to fabricate a robust image steganography protocol for cloud-based E-healthcare platforms by locating content that overlays the secret (or hidden) bits. The design employed in our technique precludes the need for pre and/or post encryption of the carrier and secret images. Furthermore, our design simplifies the process to extract the confidential (hidden) information since only the stego image and primary states to run the CAQWs are required. We validate our proposed protocol on a dataset of medical images, which exhibited remarkable outcomes in terms of their security, good visual quality, high resistance to data loss attacks, high embedding capacity, etc., making the proposed scheme a veritable strategy for efficient medical image steganography.}, } @article {pmid32485943, year = {2020}, author = {Silva, FSD and Silva, E and Neto, EP and Lemos, M and Neto, AJV and Esposito, F}, title = {A Taxonomy of DDoS Attack Mitigation Approaches Featured by SDN Technologies in IoT Scenarios.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {11}, pages = {}, pmid = {32485943}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) has attracted much attention from the Information and Communication Technology (ICT) community in recent years. One of the main reasons for this is the availability of techniques provided by this paradigm, such as environmental monitoring employing user data and everyday objects. The facilities provided by the IoT infrastructure allow the development of a wide range of new business models and applications (e.g., smart homes, smart cities, or e-health). However, there are still concerns over the security measures which need to be addressed to ensure a suitable deployment. Distributed Denial of Service (DDoS) attacks are among the most severe virtual threats at present and occur prominently in this scenario, which can be mainly owed to their ease of execution. In light of this, several research studies have been conducted to find new strategies as well as improve existing techniques and solutions. The use of emerging technologies such as those based on the Software-Defined Networking (SDN) paradigm has proved to be a promising alternative as a means of mitigating DDoS attacks. However, the high granularity that characterizes the IoT scenarios and the wide range of techniques explored during the DDoS attacks make the task of finding and implementing new solutions quite challenging. This problem is exacerbated by the lack of benchmarks that can assist developers when designing new solutions for mitigating DDoS attacks for increasingly complex IoT scenarios. To fill this knowledge gap, in this study we carry out an in-depth investigation of the state-of-the-art and create a taxonomy that describes and characterizes existing solutions and highlights their main limitations. Our taxonomy provides a comprehensive view of the reasons for the deployment of the solutions, and the scenario in which they operate. The results of this study demonstrate the main benefits and drawbacks of each solution set when applied to specific scenarios by examining current trends and future perspectives, for example, the adoption of emerging technologies based on Cloud and Edge (or Fog) Computing.}, } @article {pmid32479601, year = {2020}, author = {Mohanraj, S and Díaz-Mejía, JJ and Pham, MD and Elrick, H and Husić, M and Rashid, S and Luo, P and Bal, P and Lu, K and Patel, S and Mahalanabis, A and Naidas, A and Christensen, E and Croucher, D and Richards, LM and Shooshtari, P and Brudno, M and Ramani, AK and Pugh, TJ}, title = {CReSCENT: CanceR Single Cell ExpressioN Toolkit.}, journal = {Nucleic acids research}, volume = {48}, number = {W1}, pages = {W372-W379}, pmid = {32479601}, issn = {1362-4962}, mesh = {Humans ; Neoplasms/*genetics/immunology ; RNA-Seq/*methods ; Single-Cell Analysis/*methods ; *Software ; T-Lymphocytes/metabolism ; }, abstract = {CReSCENT: CanceR Single Cell ExpressioN Toolkit (https://crescent.cloud), is an intuitive and scalable web portal incorporating a containerized pipeline execution engine for standardized analysis of single-cell RNA sequencing (scRNA-seq) data. While scRNA-seq data for tumour specimens are readily generated, subsequent analysis requires high-performance computing infrastructure and user expertise to build analysis pipelines and tailor interpretation for cancer biology. CReSCENT uses public data sets and preconfigured pipelines that are accessible to computational biology non-experts and are user-editable to allow optimization, comparison, and reanalysis for specific experiments. Users can also upload their own scRNA-seq data for analysis and results can be kept private or shared with other users.}, } @article {pmid32479411, year = {2020}, author = {Ye, Q and Zhou, J and Wu, H}, title = {Using Information Technology to Manage the COVID-19 Pandemic: Development of a Technical Framework Based on Practical Experience in China.}, journal = {JMIR medical informatics}, volume = {8}, number = {6}, pages = {e19515}, pmid = {32479411}, issn = {2291-9694}, abstract = {BACKGROUND: The coronavirus disease (COVID-19) epidemic poses an enormous challenge to the global health system, and governments have taken active preventive and control measures. The health informatics community in China has actively taken action to leverage health information technologies for epidemic monitoring, detection, early warning, prevention and control, and other tasks.

OBJECTIVE: The aim of this study was to develop a technical framework to respond to the COVID-19 epidemic from a health informatics perspective.

METHODS: In this study, we collected health information technology-related information to understand the actions taken by the health informatics community in China during the COVID-19 outbreak and developed a health information technology framework for epidemic response based on health information technology-related measures and methods.

RESULTS: Based on the framework, we review specific health information technology practices for managing the outbreak in China, describe the highlights of their application in detail, and discuss critical issues to consider when using health information technology. Technologies employed include mobile and web-based services such as Internet hospitals and Wechat, big data analyses (including digital contact tracing through QR codes or epidemic prediction), cloud computing, Internet of things, Artificial Intelligence (including the use of drones, robots, and intelligent diagnoses), 5G telemedicine, and clinical information systems to facilitate clinical management for COVID-19.

CONCLUSIONS: Practical experience in China shows that health information technologies play a pivotal role in responding to the COVID-19 epidemic.}, } @article {pmid32479340, year = {2020}, author = {Li, NS and Chen, YT and Hsu, YP and Pang, HH and Huang, CY and Shiue, YL and Wei, KC and Yang, HW}, title = {Mobile healthcare system based on the combination of a lateral flow pad and smartphone for rapid detection of uric acid in whole blood.}, journal = {Biosensors & bioelectronics}, volume = {164}, number = {}, pages = {112309}, doi = {10.1016/j.bios.2020.112309}, pmid = {32479340}, issn = {1873-4235}, mesh = {*Biosensing Techniques ; *Delivery of Health Care ; *Gout/diagnosis ; Humans ; *Smartphone ; Uric Acid/blood ; }, abstract = {Excessive production of uric acid (UA) in blood may lead to gout, hyperuricaemia and kidney disorder; thus, a fast, simple and reliable biosensor is needed to routinely determine the UA concentration in blood without pretreatment. The purpose of this study was to develop a mobile healthcare (mHealth) system using a drop of blood, which comprised a lateral flow pad (LFP), mesoporous Prussian blue nanoparticles (MPBs) as artificial nanozymes and auto-calculation software for on-site determination of UA in blood and data management. A standard curve was found to be linear in the range of 1.5-8.5 mg/dL UA, and convenience, cloud computing and personal information management were simultaneously achieved for the proposed mHealth system. Our mHealth system appropriately met the requirements of application in patients' homes, with the potential of real-time monitoring by their primary care physicians (PCPs).}, } @article {pmid32477655, year = {2020}, author = {Lee, V and Parekh, K and Matthew, G and Shi, Q and Pelletier, K and Canale, A and Luzuriaga, K and Mathew, J}, title = {JITA: A Platform for Enabling Real Time Point-of-Care Patient Recruitment.}, journal = {AMIA Joint Summits on Translational Science proceedings. AMIA Joint Summits on Translational Science}, volume = {2020}, number = {}, pages = {355-359}, pmid = {32477655}, issn = {2153-4063}, support = {UL1 TR000161/TR/NCATS NIH HHS/United States ; }, abstract = {Timely accrual continues to be a challenge in clinical trials. The evolution of Electronic Health Record systems and cohort selection tools like i2b2 have improved identification of potential candidate participants. However, delays in receiving relevant patient information and lack of real time patient identification cause difficulty in meeting recruitment targets. The authors have designed and developed a proof of concept platform that informs authorized study team members about potential participant matches while the patient is at a healthcare setting. This Just-In-Time Alert (JITA) application leverages Health Level 7 (HL7) messages and parses them against study eligibility criteria using Amazon Web Services (AWS) cloud technologies. When required conditions are satisfied, the rules engine triggers an alert to the study team. Our pilot tests using difficult to recruit trials currently underway at the UMass Medical School have shown significant potential by generating more than 90 patient alerts in a 90-day testing timeframe.}, } @article {pmid32473441, year = {2020}, author = {Liu, A and Wu, Q and Cheng, X}, title = {Using the Google Earth Engine to estimate a 10 m resolution monthly inventory of soil fugitive dust emissions in Beijing, China.}, journal = {The Science of the total environment}, volume = {735}, number = {}, pages = {139174}, doi = {10.1016/j.scitotenv.2020.139174}, pmid = {32473441}, issn = {1879-1026}, abstract = {Soil fugitive dust (SFD) is an important contributor to ambient particulate matter (PM), but most current SFD emission inventories are updated slowly or have low resolution. In areas where vegetation coverage and climatic conditions undergo significant seasonal changes, the classic wind erosion equation (WEQ) tends to underestimate SFD emissions, increasing the need for higher spatiotemporal data resolution. Continuous acquisition of precise bare soil maps is the key barrier to compiling monthly high-resolution SFD emission inventories. In this study, we proposed taking advantage of the massive Landsat and Sentinel-2 imagery data sets stored in the Google Earth Engine (GEE) cloud platform to enable the rapid production of bare soil maps with spatial resolutions of up to 10 m. The resulting improved spatiotemporal resolution of wind erosion parameters allowed us to estimate SFD emissions in Beijing as being ~5-7 times the level calculated by the WEQ. Spring and winter accounted for >85% of SFD emissions, while April was the dustiest month with SFD emissions of PM10 exceeding 11,000 t. Our results highlighted the role of SFD in air pollution during winter and spring in northern China, and suggested that GEE should be further used for image acquisition, data processing, and compilation of gridded SFD inventories. These inventories can help identify the location and intensity of SFD sources while providing supporting information for local authorities working to develop targeted mitigation measures.}, } @article {pmid32467813, year = {2020}, author = {Massaad, E and Cherfan, P}, title = {Social Media Data Analytics on Telehealth During the COVID-19 Pandemic.}, journal = {Cureus}, volume = {12}, number = {4}, pages = {e7838}, pmid = {32467813}, issn = {2168-8184}, abstract = {INTRODUCTION: Physical distancing during the coronavirus Covid-19 pandemic has brought telehealth to the forefront to keep up with patient care amidst an international crisis that is exhausting healthcare resources. Understanding and managing health-related concerns resulting from physical distancing measures are of utmost importance.

OBJECTIVES: To describe and analyze the volume, content, and geospatial distribution of tweets associated with telehealth during the Covid-19 pandemic.

METHODS: We inquired Twitter public data to access tweets related to telehealth from March 30, 2020 to April 6, 2020. We analyzed tweets using natural language processing (NLP) and unsupervised learning methods. Clustering analysis was performed to classify tweets. Geographic tweet distribution was correlated with Covid-19 confirmed cases in the United States. All analyses were carried on the Google Cloud computing service "Google Colab" using Python libraries (Python Software Foundation).

RESULTS: A total of 41,329 tweets containing the term "telehealth" were retrieved. The most common terms appearing alongside 'telehealth' were "covid", "health", "care", "services", "patients", and "pandemic". Mental health was the most common health-related topic that appeared in our search reflecting a high need for mental healthcare during the pandemic. Similarly, Medicare was the most common appearing health plan mirroring the accelerated access to telehealth and change in coverage policies. The geographic distribution of tweets related to telehealth and having a specific location within the United States (n=19,367) was significantly associated with the number of confirmed Covid-19 cases reported in each state (p<0.001).

CONCLUSION: Social media activity is an accurate reflection of disease burden during the Covid-19 pandemic. Widespread adoption of telehealth-favoring policies is necessary and mostly needed to address mental health problems that may arise in areas of high infection and death rates.}, } @article {pmid32466770, year = {2020}, author = {Tian, L and Li, Y and Edmonson, MN and Zhou, X and Newman, S and McLeod, C and Thrasher, A and Liu, Y and Tang, B and Rusch, MC and Easton, J and Ma, J and Davis, E and Trull, A and Michael, JR and Szlachta, K and Mullighan, C and Baker, SJ and Downing, JR and Ellison, DW and Zhang, J}, title = {CICERO: a versatile method for detecting complex and diverse driver fusions using cancer RNA sequencing data.}, journal = {Genome biology}, volume = {21}, number = {1}, pages = {126}, pmid = {32466770}, issn = {1474-760X}, support = {T32 CA009683/CA/NCI NIH HHS/United States ; P30 CA021765/CA/NCI NIH HHS/United States ; P01 CA096832/CA/NCI NIH HHS/United States ; }, mesh = {Algorithms ; *Gene Fusion ; Humans ; Molecular Sequence Annotation/*methods ; Neoplasms/*genetics ; Sequence Analysis, RNA ; *Software ; }, abstract = {To discover driver fusions beyond canonical exon-to-exon chimeric transcripts, we develop CICERO, a local assembly-based algorithm that integrates RNA-seq read support with extensive annotation for candidate ranking. CICERO outperforms commonly used methods, achieving a 95% detection rate for 184 independently validated driver fusions including internal tandem duplications and other non-canonical events in 170 pediatric cancer transcriptomes. Re-analysis of TCGA glioblastoma RNA-seq unveils previously unreported kinase fusions (KLHL7-BRAF) and a 13% prevalence of EGFR C-terminal truncation. Accessible via standard or cloud-based implementation, CICERO enhances driver fusion detection for research and precision oncology. The CICERO source code is available at https://github.com/stjude/Cicero.}, } @article {pmid32462884, year = {2020}, author = {Zarowitz, BJ}, title = {Emerging Pharmacotherapy and Health Care Needs of Patients in the Age of Artificial Intelligence and Digitalization.}, journal = {The Annals of pharmacotherapy}, volume = {54}, number = {10}, pages = {1038-1046}, doi = {10.1177/1060028020919383}, pmid = {32462884}, issn = {1542-6270}, mesh = {Aged ; Aged, 80 and over ; *Artificial Intelligence ; Decision Support Systems, Clinical ; Delivery of Health Care/*methods ; *Digital Technology ; Drug Therapy/*methods ; Female ; Humans ; Male ; Telemedicine/*methods ; }, abstract = {Advances in the application of artificial intelligence, digitization, technology, iCloud computing, and wearable devices in health care predict an exciting future for health care professionals and our patients. Projections suggest an older, generally healthier, better-informed but financially less secure patient population of wider cultural and ethnic diversity that live throughout the United States. A pragmatic yet structured approach is recommended to prepare health care professionals and patients for emerging pharmacotherapy needs. Clinician training should include genomics, cloud computing, use of large data sets, implementation science, and cultural competence. Patients will need support for wearable devices and reassurance regarding digital medicine.}, } @article {pmid32459811, year = {2020}, author = {Cheng, C and Zhou, H and Chai, X and Li, Y and Wang, D and Ji, Y and Niu, S and Hou, Y}, title = {Adoption of image surface parameters under moving edge computing in the construction of mountain fire warning method.}, journal = {PloS one}, volume = {15}, number = {5}, pages = {e0232433}, pmid = {32459811}, issn = {1932-6203}, mesh = {*Algorithms ; China ; Cloud Computing ; Computer Systems ; Conservation of Natural Resources/methods/statistics & numerical data ; Discriminant Analysis ; Geological Phenomena ; Humans ; Image Processing, Computer-Assisted/*methods/statistics & numerical data ; Software ; Surface Properties ; Wildfires/*prevention & control/statistics & numerical data ; }, abstract = {In order to cope with the problems of high frequency and multiple causes of mountain fires, it is very important to adopt appropriate technologies to monitor and warn mountain fires through a few surface parameters. At the same time, the existing mobile terminal equipment is insufficient in image processing and storage capacity, and the energy consumption is high in the data transmission process, which requires calculation unloading. For this circumstance, first, a hierarchical discriminant analysis algorithm based on image feature extraction is introduced, and the image acquisition software in the mobile edge computing environment in the android system is designed and installed. Based on the remote sensing data, the land surface parameters of mountain fire are obtained, and the application of image recognition optimization algorithm in the mobile edge computing (MEC) environment is realized to solve the problem of transmission delay caused by traditional mobile cloud computing (MCC). Then, according to the forest fire sensitivity index, a forest fire early warning model based on MEC is designed. Finally, the image recognition response time and bandwidth consumption of the algorithm are studied, and the occurrence probability of mountain fire in Muli county, Liangshan prefecture, Sichuan is predicted. The results show that, compared with the MCC architecture, the algorithm presented in this study has shorter recognition and response time to different images in WiFi network environment; compared with MCC, MEC architecture can identify close users and transmit less data, which can effectively reduce the bandwidth pressure of the network. In most areas of Muli county, Liangshan prefecture, the probability of mountain fire is relatively low, the probability of mountain fire caused by non-surface environment is about 8 times that of the surface environment, and the influence of non-surface environment in the period of high incidence of mountain fire is lower than that in the period of low incidence. In conclusion, the surface parameters of MEC can be used to effectively predict the mountain fire and provide preventive measures in time.}, } @article {pmid32457555, year = {2019}, author = {Hylton, A and Henselman-Petrusek, G and Sang, J and Short, R}, title = {Tuning the Performance of a Computational Persistent Homology Package.}, journal = {Software: practice & experience}, volume = {49}, number = {5}, pages = {885-905}, pmid = {32457555}, issn = {0038-0644}, support = {ARMD_629660/ImNASA/Intramural NASA/United States ; }, abstract = {In recent years, persistent homology has become an attractive method for data analysis. It captures topological features, such as connected components, holes, and voids from point cloud data and summarizes the way in which these features appear and disappear in a filtration sequence. In this project, we focus on improving the performance of Eirene, a computational package for persistent homology. Eirene is a 5000-line open-source software library implemented in the dynamic programming language Julia. We use the Julia profiling tools to identify performance bottlenecks and develop novel methods to manage them, including the parallelization of some time-consuming functions on multicore/manycore hardware. Empirical results show that performance can be greatly improved.}, } @article {pmid32455635, year = {2020}, author = {Kim, M and Yu, S and Lee, J and Park, Y and Park, Y}, title = {Design of Secure Protocol for Cloud-Assisted Electronic Health Record System Using Blockchain.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {10}, pages = {}, pmid = {32455635}, issn = {1424-8220}, mesh = {*Blockchain ; *Cloud Computing ; Computer Security ; Computer Systems ; Confidentiality ; *Electronic Health Records ; Humans ; Technology ; }, abstract = {In the traditional electronic health record (EHR) management system, each medical service center manages their own health records, respectively, which are difficult to share on the different medical platforms. Recently, blockchain technology is one of the popular alternatives to enable medical service centers based on different platforms to share EHRs. However, it is hard to store whole EHR data in blockchain because of the size and the price of blockchain. To resolve this problem, cloud computing is considered as a promising solution. Cloud computing offers advantageous properties such as storage availability and scalability. Unfortunately, the EHR system with cloud computing can be vulnerable to various attacks because the sensitive data is sent over a public channel. We propose the secure protocol for cloud-assisted EHR system using blockchain. In the proposed scheme, blockchain technology is used to provide data integrity and access control using log transactions and the cloud server stores and manages the patient's EHRs to provide secure storage resources. We use an elliptic curve cryptosystems (ECC) to provide secure health data sharing with cloud computing. We demonstrate that the proposed EHR system can prevent various attacks by using informal security analysis and automated validation of internet security protocols and applications (AVISPA) simulation. Furthermore, we prove that the proposed EHR system provides secure mutual authentication using BAN logic analysis. We then compare the computation overhead, communication overhead, and security properties with existing schemes. Consequently, the proposed EHR system is suitable for the practical healthcare system considering security and efficiency.}, } @article {pmid32442274, year = {2020}, author = {Xu, Y and Yang-Turner, F and Volk, D and Crook, D}, title = {NanoSPC: a scalable, portable, cloud compatible viral nanopore metagenomic data processing pipeline.}, journal = {Nucleic acids research}, volume = {48}, number = {W1}, pages = {W366-W371}, pmid = {32442274}, issn = {1362-4962}, support = {/DH_/Department of Health/United Kingdom ; }, mesh = {Bacteria/genetics/isolation & purification ; Cloud Computing ; *Genome, Viral ; Metagenomics/*methods ; Nanopore Sequencing/*methods ; *Software ; Viruses/*genetics/isolation & purification ; }, abstract = {Metagenomic sequencing combined with Oxford Nanopore Technology has the potential to become a point-of-care test for infectious disease in public health and clinical settings, providing rapid diagnosis of infection, guiding individual patient management and treatment strategies, and informing infection prevention and control practices. However, publicly available, streamlined, and reproducible pipelines for analyzing Nanopore metagenomic sequencing data are still lacking. Here we introduce NanoSPC, a scalable, portable and cloud compatible pipeline for analyzing Nanopore sequencing data. NanoSPC can identify potentially pathogenic viruses and bacteria simultaneously to provide comprehensive characterization of individual samples. The pipeline can also detect single nucleotide variants and assemble high quality complete consensus genome sequences, permitting high-resolution inference of transmission. We implement NanoSPC using Nextflow manager within Docker images to allow reproducibility and portability of the analysis. Moreover, we deploy NanoSPC to our scalable pathogen pipeline platform, enabling elastic computing for high throughput Nanopore data on HPC cluster as well as multiple cloud platforms, such as Google Cloud, Amazon Elastic Computing Cloud, Microsoft Azure and OpenStack. Users could either access our web interface (https://nanospc.mmmoxford.uk) to run cloud-based analysis, monitor process, and visualize results, as well as download Docker images and run command line to analyse data locally.}, } @article {pmid32406827, year = {2020}, author = {Maeser, R}, title = {Analyzing CSP Trustworthiness and Predicting Cloud Service Performance.}, journal = {IEEE computer graphics and applications}, volume = {}, number = {}, pages = {}, doi = {10.1109/OJCS.2020.2994095}, pmid = {32406827}, issn = {1558-1756}, abstract = {Analytics firm Cyence estimated Amazon's four-hour cloud computing outage in 2017 "cost S&P 500 companies at least $150 million" and traffic monitoring firm Apica claimed "54 of the top 100 online retailers saw site performance slump by at least 20 percent". According to Ponemon, 2015 data center outages cost Fortune 1000 companies between $1.25 and $2.5 billion. Despite potential risks, the cloud computing industry continues to grow. For example, Internet of Things, which is projected to grow 266% between 2013 and 2020, will drive increased demand on cloud computing as data across multiple industries is collected and sent back to cloud data centers for processing. RightScale estimates enterprises will continue to increase cloud demand with 85% having multi-cloud strategies. This growth and dependency will influence risk exposure and potential for impact (e.g. availability, performance, security, financial). The research in this paper and proposed solution calculates cloud service provider (CSP) trustworthiness levels and predicts cloud service and cloud service level agreement (SLA) availability performance. Evolving industry standards (e.g. NIST, ISO/IEC) for cloud SLAs and existing work regarding CSP trustworthiness will be leveraged as regression-based predictive models are constructed to analyze CSP cloud computing services, SLA performance and CSP trustworthiness.}, } @article {pmid32406416, year = {2020}, author = {Greco, L and Percannella, G and Ritrovato, P and Tortorella, F and Vento, M}, title = {Trends in IoT based solutions for health care: Moving AI to the edge.}, journal = {Pattern recognition letters}, volume = {135}, number = {}, pages = {346-353}, pmid = {32406416}, issn = {0167-8655}, abstract = {In recent times, we assist to an ever growing diffusion of smart medical sensors and Internet of things devices that are heavily changing the way healthcare is approached worldwide. In this context, a combination of Cloud and IoT architectures is often exploited to make smart healthcare systems capable of supporting near realtime applications when processing and performing Artificial Intelligence on the huge amount of data produced by wearable sensor networks. Anyway, the response time and the availability of cloud based systems, together with security and privacy, still represent critical issues that prevents Internet of Medical Things (IoMT) devices and architectures from being a reliable and effective solution to the aim. Lately, there is a growing interest towards architectures and approaches that exploit Edge and Fog computing as an answer to compensate the weaknesses of the cloud. In this paper, we propose a short review about the general use of IoT solutions in health care, starting from early health monitoring solutions from wearable sensors up to a discussion about the latest trends in fog/edge computing for smart health.}, } @article {pmid32400988, year = {2020}, author = {Chen, S and Huang, J and Gao, Z}, title = {[Development of Hospital Medical Instrumentation Management System Based on Cloud Computing].}, journal = {Zhongguo yi liao qi xie za zhi = Chinese journal of medical instrumentation}, volume = {44}, number = {2}, pages = {141-144}, doi = {10.3969/j.issn.1671-7104.2020.02.010}, pmid = {32400988}, issn = {1671-7104}, mesh = {*Cloud Computing ; *Durable Medical Equipment ; *Materials Management, Hospital ; Software ; }, abstract = {In order to improve the efficiency of medical instrumentation management in hospital, reduce the management cost and save the human cost, this study analyzes the problems in the traditional hospital medical instrumentation management system, and develops a new system based on cloud computing. Through the characters of the SaaS Service Platform, the system improves the flow efficiency of the medical instrumentation in hospital, saves deployment and operating system costs, and improves the people's work efficiency.}, } @article {pmid32399163, year = {2020}, author = {Yu, J and Li, H and Liu, D}, title = {Modified Immune Evolutionary Algorithm for Medical Data Clustering and Feature Extraction under Cloud Computing Environment.}, journal = {Journal of healthcare engineering}, volume = {2020}, number = {}, pages = {1051394}, pmid = {32399163}, issn = {2040-2309}, mesh = {*Algorithms ; Big Data ; *Cloud Computing ; Cluster Analysis ; Data Mining/*methods/statistics & numerical data ; }, abstract = {Medical data have the characteristics of particularity and complexity. Big data clustering plays a significant role in the area of medicine. The traditional clustering algorithms are easily falling into local extreme value. It will generate clustering deviation, and the clustering effect is poor. Therefore, we propose a new medical big data clustering algorithm based on the modified immune evolutionary method under cloud computing environment to overcome the above disadvantages in this paper. Firstly, we analyze the big data structure model under cloud computing environment. Secondly, we give the detailed modified immune evolutionary method to cluster medical data including encoding, constructing fitness function, and selecting genetic operators. Finally, the experiments show that this new approach can improve the accuracy of data classification, reduce the error rate, and improve the performance of data mining and feature extraction for medical data clustering.}, } @article {pmid32397423, year = {2020}, author = {Yangui, S}, title = {A Panorama of Cloud Platforms for IoT Applications Across Industries.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {9}, pages = {}, pmid = {32397423}, issn = {1424-8220}, abstract = {Internet of Things (IoT) applications can play a critical role in business and industry. Industrial IoT (IIoT) refers to the use of IoT technologies in manufacturing. Enabling IIoT applications in cloud environments requires the design of appropriate IIoT Platform as-a-Service (IIoT PaaS) to support and ease their provisioning (i.e., development, deployment and management). This paper critically reviews the IIoT PaaS architectures proposed so far in the relevant literature. It only surveys the architectures that are suitable for IIoT applications provisioning and it excludes regular IoT solutions from its scope. The evaluation is based on a set of well-defined architectural requirements. It also introduces and discusses the future challenges and the research directions. The critical review discusses the PaaS solutions that focus on the whole spectrum of IoT verticals and also the ones dealing with specific IoT verticals. Existing limitations are identified and hints are provided on how to tackle them. As critical research directions, the mechanisms that enable the secure provisioning, and IIoT PaaS interaction with virtualized IoT Infrastructure as-a-Service (IaaS) and fog computing layer are discussed.}, } @article {pmid32396074, year = {2020}, author = {Wu, Q and He, K and Chen, X}, title = {Personalized Federated Learning for Intelligent IoT Applications: A Cloud-Edge based Framework.}, journal = {IEEE computer graphics and applications}, volume = {}, number = {}, pages = {}, doi = {10.1109/OJCS.2020.2993259}, pmid = {32396074}, issn = {1558-1756}, abstract = {Internet of Things (IoT) have widely penetrated in different aspects of modern life and many intelligent IoT services and applications are emerging. Recently, federated learning is proposed to train a globally shared model by exploiting a massive amount of user-generated data samples on IoT devices while preventing data leakage. However, the device, statistical and model heterogeneities inherent in the complex IoT environments pose great challenges to traditional federated learning, making it unsuitable to be directly deployed. In this paper we advocate a personalized federated learning framework in a cloud-edge architecture for intelligent IoT applications. To cope with the heterogeneity issues in IoT environments, we investigate emerging personalized federated learning methods which are able to mitigate the negative effects caused by heterogeneities in different aspects. With the power of edge computing, the requirements for fast-processing capacity and low latency in intelligent IoT applications can also be achieved. We finally provide a case study of IoT based human activity recognition to demonstrate the effectiveness of personalized federated learning for intelligent IoT applications.}, } @article {pmid32382696, year = {2018}, author = {Ahmed, AE and Mpangase, PT and Panji, S and Baichoo, S and Souilmi, Y and Fadlelmola, FM and Alghali, M and Aron, S and Bendou, H and De Beste, E and Mbiyavanga, M and Souiai, O and Yi, L and Zermeno, J and Armstrong, D and O'Connor, BD and Mainzer, LS and Crusoe, MR and Meintjes, A and Van Heusden, P and Botha, G and Joubert, F and Jongeneel, CV and Hazelhurst, S and Mulder, N}, title = {Organizing and running bioinformatics hackathons within Africa: The H3ABioNet cloud computing experience.}, journal = {AAS open research}, volume = {1}, number = {}, pages = {9}, pmid = {32382696}, issn = {2515-9321}, support = {U24 HG006941/HG/NHGRI NIH HHS/United States ; U41 HG006941/HG/NHGRI NIH HHS/United States ; }, abstract = {The need for portable and reproducible genomics analysis pipelines is growing globally as well as in Africa, especially with the growth of collaborative projects like the Human Health and Heredity in Africa Consortium (H3Africa). The Pan-African H3Africa Bioinformatics Network (H3ABioNet) recognized the need for portable, reproducible pipelines adapted to heterogeneous computing environments, and for the nurturing of technical expertise in workflow languages and containerization technologies. Building on the network's Standard Operating Procedures (SOPs) for common genomic analyses, H3ABioNet arranged its first Cloud Computing and Reproducible Workflows Hackathon in 2016, with the purpose of translating those SOPs into analysis pipelines able to run on heterogeneous computing environments and meeting the needs of H3Africa research projects. This paper describes the preparations for this hackathon and reflects upon the lessons learned about its impact on building the technical and scientific expertise of African researchers. The workflows developed were made publicly available in GitHub repositories and deposited as container images on Quay.io.}, } @article {pmid32379124, year = {2020}, author = {Myers, TG and Ramkumar, PN and Ricciardi, BF and Urish, KL and Kipper, J and Ketonis, C}, title = {Artificial Intelligence and Orthopaedics: An Introduction for Clinicians.}, journal = {The Journal of bone and joint surgery. American volume}, volume = {102}, number = {9}, pages = {830-840}, pmid = {32379124}, issn = {1535-1386}, mesh = {*Artificial Intelligence ; Humans ; *Orthopedic Procedures ; *Orthopedics ; }, abstract = {➤. Artificial intelligence (AI) provides machines with the ability to perform tasks using algorithms governed by pattern recognition and self-correction on large amounts of data to narrow options in order to avoid errors. ➤. The 4 things necessary for AI in medicine include big data sets, powerful computers, cloud computing, and open source algorithmic development. ➤. The use of AI in health care continues to expand, and its impact on orthopaedic surgery can already be found in diverse areas such as image recognition, risk prediction, patient-specific payment models, and clinical decision-making. ➤. Just as the business of medicine was once considered outside the domain of the orthopaedic surgeon, emerging technologies such as AI warrant ownership, leverage, and application by the orthopaedic surgeon to improve the care that we provide to the patients we serve. ➤. AI could provide solutions to factors contributing to physician burnout and medical mistakes. However, challenges regarding the ethical deployment, regulation, and the clinical superiority of AI over traditional statistics and decision-making remain to be resolved.}, } @article {pmid32370129, year = {2020}, author = {Celesti, A and Ruggeri, A and Fazio, M and Galletta, A and Villari, M and Romano, A}, title = {Blockchain-Based Healthcare Workflow for Tele-Medical Laboratory in Federated Hospital IoT Clouds.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {9}, pages = {}, pmid = {32370129}, issn = {1424-8220}, support = {GR-2016-02361306//Ministero della Salute/ ; }, mesh = {*Betacoronavirus ; Blockchain ; COVID-19 ; Cloud Computing ; Computer Security ; *Coronavirus Infections/prevention & control/transmission ; Humans ; *Pandemics/prevention & control ; *Pneumonia, Viral/prevention & control/transmission ; SARS-CoV-2 ; Telemedicine/*methods/*organization & administration ; }, abstract = {In a pandemic situation such as that we are living at the time of writing of this paper due to the Covid-19 virus, the need of tele-healthcare service becomes dramatically fundamental to reduce the movement of patients, thence reducing the risk of infection. Leveraging the recent Cloud computing and Internet of Things (IoT) technologies, this paper aims at proposing a tele-medical laboratory service where clinical exams are performed on patients directly in a hospital by technicians through IoT medical devices and results are automatically sent via the hospital Cloud to doctors of federated hospitals for validation and/or consultation. In particular, we discuss a distributed scenario where nurses, technicians and medical doctors belonging to different hospitals cooperate through their federated hospital Clouds to form a virtual health team able to carry out a healthcare workflow in secure fashion leveraging the intrinsic security features of the Blockchain technology. In particular, both public and hybrid Blockchain scenarios are discussed and assessed using the Ethereum platform.}, } @article {pmid32365815, year = {2020}, author = {Vilela, PH and Rodrigues, JJPC and Righi, RDR and Kozlov, S and Rodrigues, VF}, title = {Looking at Fog Computing for E-Health through the Lens of Deployment Challenges and Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {9}, pages = {}, pmid = {32365815}, issn = {1424-8220}, support = {Project UIDB/EEA/50008/2020//FCT/MCTES/ ; Grant 08-08//Government of the Russian Federation/ ; Grant No. 431726/2018-3//Conselho Nacional de Desenvolvimento Científico e Tecnológico/ ; Grants No. 309335/2017-5//Conselho Nacional de Desenvolvimento Científico e Tecnológico/ ; }, mesh = {*Cloud Computing ; Humans ; *Lenses ; Monitoring, Physiologic ; Privacy ; *Telemedicine ; }, abstract = {Fog computing is a distributed infrastructure where specific resources are managed at the network border using cloud computing principles and technologies. In contrast to traditional cloud computing, fog computing supports latency-sensitive applications with less energy consumption and a reduced amount of data traffic. A fog device is placed at the network border, allowing data collection and processing to be physically close to their end-users. This characteristic is essential for applications that can benefit from improved latency and response time. In particular, in the e-Health field, many solutions rely on real-time data to monitor environments, patients, and/or medical staff, aiming at improving processes and safety. Therefore, fog computing can play an important role in such environments, providing a low latency infrastructure. The main goal of the current research is to present fog computing strategies focused on electronic-Health (e-Health) applications. To the best of our knowledge, this article is the first to propose a review in the scope of applications and challenges of e-Health fog computing. We introduce some of the available e-Health solutions in the literature that focus on latency, security, privacy, energy efficiency, and resource management techniques. Additionally, we discuss communication protocols and technologies, detailing both in an architectural overview from the edge devices up to the cloud. Differently from traditional cloud computing, the fog concept demonstrates better performance in terms of time-sensitive requirements and network data traffic. Finally, based on the evaluation of the current technologies for e-Health, open research issues and challenges are identified, and further research directions are proposed.}, } @article {pmid32365040, year = {2022}, author = {Liu, GP}, title = {Coordinated Control of Networked Multiagent Systems via Distributed Cloud Computing Using Multistep State Predictors.}, journal = {IEEE transactions on cybernetics}, volume = {52}, number = {2}, pages = {810-820}, doi = {10.1109/TCYB.2020.2985043}, pmid = {32365040}, issn = {2168-2275}, abstract = {This article studies the coordinated control problem of networked multiagent systems via distributed cloud computing. A distributed cloud predictive control scheme is proposed to achieve desired coordination control performance and compensate actively for communication delays between the cloud computing nodes and between the agents. This scheme includes the design of a multistep state predictor and optimization of control coordination. The multistep state predictor provides a novel way of predicting future immeasurable states of agents in a large horizontal length. The optimization of control coordination minimizes the distributed cost functions which are presented to measure the coordination between the agents so that the optimal design of the coordination controllers is simple with little computational increase for large-scale-networked multiagent systems. Further analysis derives the conditions of simultaneous stability and consensus of the closed-loop-networked multiagent systems using the distributed cloud predictive control scheme. The effectiveness of the proposed scheme is illustrated by an example.}, } @article {pmid32356920, year = {2020}, author = {Luo, J and Chen, C and Li, Q}, title = {White blood cell counting at point-of-care testing: A review.}, journal = {Electrophoresis}, volume = {41}, number = {16-17}, pages = {1450-1468}, doi = {10.1002/elps.202000029}, pmid = {32356920}, issn = {1522-2683}, mesh = {Equipment Design ; Humans ; *Leukocyte Count ; *Microfluidic Analytical Techniques ; *Point-of-Care Systems ; }, abstract = {White blood cells, which are also called leukocytes, are found in the immune system that are involved in protecting the body against infections and foreign invaders. Conventional methods of leukocyte analysis provide valuable and accurate information to medical specialists. Analyzing and diagnosing of a disease requires a combination of multiple biomarkers, in some cases, however, such as personal health care, this will occupy some medical resources and causes unnecessary consumption. Traditional method (such as flow cytometer) for WBC counting is time and labor consuming. Compared to gold standard (flow-based fraction/micropore filtration) or improved filtration methods for WBC counting, this is still a lengthy and time consuming process and can lead to membrane fouling due to the rapid accumulation of biological materials. Therefore, the analysis of WBC counts requires more compact and efficient equipment. The microfluidic technologies, powered by different field (force, thermal, acoustic, optical, magnetic) and other methods for leukocyte counting and analysis, are much cost-efficient and can be used in in-home or in resource-limited areas to achieve Point-of-Care (POC). In this review, we highlight the mainstream devices that have been commercialized and extensively employed for patients for WBC counting, Next, we present some recent development with regards to leucocyte counting (mainly microfluidic technologies) and comment on their relative merits. We aim to focus and discuss the possibility of achieving POC and help researchers to tackle individual challenges accordingly. Finally, we offer some technologies in addition to previous detection devices, such as image recognition technology and cloud computing, which we believe have great potential to further promote real-time detection and improve medical diagnosis.}, } @article {pmid32349242, year = {2020}, author = {Kayes, ASM and Kalaria, R and Sarker, IH and Islam, MS and Watters, PA and Ng, A and Hammoudeh, M and Badsha, S and Kumara, I}, title = {A Survey of Context-Aware Access Control Mechanisms for Cloud and Fog Networks: Taxonomy and Open Research Issues.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {9}, pages = {}, pmid = {32349242}, issn = {1424-8220}, abstract = {Over the last few decades, the proliferation of the Internet of Things (IoT) has produced an overwhelming flow of data and services, which has shifted the access control paradigm from a fixed desktop environment to dynamic cloud environments. Fog computing is associated with a new access control paradigm to reduce the overhead costs by moving the execution of application logic from the centre of the cloud data sources to the periphery of the IoT-oriented sensor networks. Indeed, accessing information and data resources from a variety of IoT sources has been plagued with inherent problems such as data heterogeneity, privacy, security and computational overheads. This paper presents an extensive survey of security, privacy and access control research, while highlighting several specific concerns in a wide range of contextual conditions (e.g., spatial, temporal and environmental contexts) which are gaining a lot of momentum in the area of industrial sensor and cloud networks. We present different taxonomies, such as contextual conditions and authorization models, based on the key issues in this area and discuss the existing context-sensitive access control approaches to tackle the aforementioned issues. With the aim of reducing administrative and computational overheads in the IoT sensor networks, we propose a new generation of Fog-Based Context-Aware Access Control (FB-CAAC) framework, combining the benefits of the cloud, IoT and context-aware computing; and ensuring proper access control and security at the edge of the end-devices. Our goal is not only to control context-sensitive access to data resources in the cloud, but also to move the execution of an application logic from the cloud-level to an intermediary-level where necessary, through adding computational nodes at the edge of the IoT sensor network. A discussion of some open research issues pertaining to context-sensitive access control to data resources is provided, including several real-world case studies. We conclude the paper with an in-depth analysis of the research challenges that have not been adequately addressed in the literature and highlight directions for future work that has not been well aligned with currently available research.}, } @article {pmid32348265, year = {2020}, author = {Ismail, L and Materwala, H and Karduck, AP and Adem, A}, title = {Requirements of Health Data Management Systems for Biomedical Care and Research: Scoping Review.}, journal = {Journal of medical Internet research}, volume = {22}, number = {7}, pages = {e17508}, pmid = {32348265}, issn = {1438-8871}, mesh = {Biomedical Research/*methods ; Data Management/*methods ; Delivery of Health Care/*methods ; Humans ; }, abstract = {BACKGROUND: Over the last century, disruptive incidents in the fields of clinical and biomedical research have yielded a tremendous change in health data management systems. This is due to a number of breakthroughs in the medical field and the need for big data analytics and the Internet of Things (IoT) to be incorporated in a real-time smart health information management system. In addition, the requirements of patient care have evolved over time, allowing for more accurate prognoses and diagnoses. In this paper, we discuss the temporal evolution of health data management systems and capture the requirements that led to the development of a given system over a certain period of time. Consequently, we provide insights into those systems and give suggestions and research directions on how they can be improved for a better health care system.

OBJECTIVE: This study aimed to show that there is a need for a secure and efficient health data management system that will allow physicians and patients to update decentralized medical records and to analyze the medical data for supporting more precise diagnoses, prognoses, and public insights. Limitations of existing health data management systems were analyzed.

METHODS: To study the evolution and requirements of health data management systems over the years, a search was conducted to obtain research articles and information on medical lawsuits, health regulations, and acts. These materials were obtained from the Institute of Electrical and Electronics Engineers, the Association for Computing Machinery, Elsevier, MEDLINE, PubMed, Scopus, and Web of Science databases.

RESULTS: Health data management systems have undergone a disruptive transformation over the years from paper to computer, web, cloud, IoT, big data analytics, and finally to blockchain. The requirements of a health data management system revealed from the evolving definitions of medical records and their management are (1) medical record data, (2) real-time data access, (3) patient participation, (4) data sharing, (5) data security, (6) patient identity privacy, and (7) public insights. This paper reviewed health data management systems based on these 7 requirements across studies conducted over the years. To our knowledge, this is the first analysis of the temporal evolution of health data management systems giving insights into the system requirements for better health care.

CONCLUSIONS: There is a need for a comprehensive real-time health data management system that allows physicians, patients, and external users to input their medical and lifestyle data into the system. The incorporation of big data analytics will aid in better prognosis or diagnosis of the diseases and the prediction of diseases. The prediction results will help in the development of an effective prevention plan.}, } @article {pmid32344803, year = {2020}, author = {Azghiou, K and El Mouhib, M and Koulali, MA and Benali, A}, title = {An End-to-End Reliability Framework of the Internet of Things.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {9}, pages = {}, pmid = {32344803}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) paradigm feeds from many scientific and engineering fields. This involves a diversity and heterogeneity of its underlying systems. When considering End-to-End IoT systems, we can identify the emergence of new classes of problems. The best-known ones are those associated to standardization for better interoperability and compatibility of those systems, and those who gave birth of new paradigms like that of Fog Computing. Predicting the reliability of an End-to-End IoT system is a problem belonging to this category. On one hand, predicting reliability can be mandatory, most times, before the deployment stage. On another hand, it may help engineers at the design and the operational stages to establish effective maintenance policies and may provide the various stakeholders and decision-makers a means to take the relevant actions. We can find in the literature works which consider only fragments of End-to-End IoT systems such as those assessing reliability for Wireless Sensors Networks (WSN) or Cloud subsystems, to cite just a few. Some other works are specific to well-defined industries, like those targeting reliability study of E-health and Smart-Grid infrastructures. Works that aims to assess reliability for an End-to-End IoT system are remarkably rare and particularly restrained in terms of expressiveness, flexibility, and in their implementation time complexity. In this paper, we apply the Reliability Block Diagram (RBD) paradigm to set up a framework for End-to-End IoT system reliability modeling and analysis. Our contribution is four-fold: we propose an IoT network-based layered architecture, we model in depth each layer of the proposed architecture, we suggest a flow chart to deploy the proposed framework, and we perform a numerical investigation of simplified scenarios. We affirm that the proposed framework is expressive, flexible, and scalable. The numerical study reveals mission time intervals which characterize the behavior of an IoT system from the point of view of its reliability.}, } @article {pmid32343907, year = {2020}, author = {Tsur, EE}, title = {Computer-Aided Design of Microfluidic Circuits.}, journal = {Annual review of biomedical engineering}, volume = {22}, number = {}, pages = {285-307}, doi = {10.1146/annurev-bioeng-082219-033358}, pmid = {32343907}, issn = {1545-4274}, mesh = {Algorithms ; Animals ; Benchmarking ; *Computer-Aided Design ; *Diagnosis, Computer-Assisted ; Equipment Design ; Humans ; *Lab-On-A-Chip Devices ; Machine Learning ; Microfluidic Analytical Techniques/methods ; *Microfluidics ; Software ; }, abstract = {Microfluidic devices developed over the past decade feature greater intricacy, increased performance requirements, new materials, and innovative fabrication methods. Consequentially, new algorithmic and design approaches have been developed to introduce optimization and computer-aided design to microfluidic circuits: from conceptualization to specification, synthesis, realization, and refinement. The field includes the development of new description languages, optimization methods, benchmarks, and integrated design tools. Here, recent advancements are reviewed in the computer-aided design of flow-, droplet-, and paper-based microfluidics. A case study of the design of resistive microfluidic networks is discussed in detail. The review concludes with perspectives on the future of computer-aided microfluidics design, including the introduction of cloud computing, machine learning, new ideation processes, and hybrid optimization.}, } @article {pmid32340971, year = {2020}, author = {Yang, G and Pang, Z and Jamal Deen, M and Dong, M and Zhang, YT and Lovell, N and Rahmani, AM}, title = {Homecare Robotic Systems for Healthcare 4.0: Visions and Enabling Technologies.}, journal = {IEEE journal of biomedical and health informatics}, volume = {24}, number = {9}, pages = {2535-2549}, doi = {10.1109/JBHI.2020.2990529}, pmid = {32340971}, issn = {2168-2208}, mesh = {*Artificial Intelligence ; Cloud Computing ; Delivery of Health Care ; Humans ; *Robotic Surgical Procedures ; }, abstract = {Powered by the technologies that have originated from manufacturing, the fourth revolution of healthcare technologies is happening (Healthcare 4.0). As an example of such revolution, new generation homecare robotic systems (HRS) based on the cyber-physical systems (CPS) with higher speed and more intelligent execution are emerging. In this article, the new visions and features of the CPS-based HRS are proposed. The latest progress in related enabling technologies is reviewed, including a